
    h                        d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZ d dlZddlmZ ddlmZmZ ddlmZ dd	lmZmZmZ dd
lmZmZ ddlmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z, ddl-m.Z. ddl/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8  e&       rd dl9Z9 e(       r e)       rd dl:m;Z< nd dl=m;Z<  e*j|                  e?      Z@dZA e!deA       e.d       G d de                    ZB e"eBj                        eB_C        eBj                  j                  8eBj                  j                  j                  ddd      eBj                  _D        yy)    N)deepcopy)partial)AnyCallableOptionalUnion   )custom_object_save)BatchFeatureget_size_dict)BaseImageProcessorFast)ChannelDimensionSizeDictvalidate_kwargs)UnpackVideosKwargs)IMAGE_PROCESSOR_NAMEPROCESSOR_NAMEVIDEO_PROCESSOR_NAME
TensorTypeadd_start_docstrings	copy_funcdownload_urlis_offline_modeis_remote_urlis_torch_availableis_torchcodec_availableis_torchvision_availableis_torchvision_v2_availablelogging)cached_file)requires)	
VideoInputVideoMetadatagroup_videos_by_shapeis_valid_video
load_videomake_batched_metadatamake_batched_videosreorder_videosto_channel_dimension_format)
functionalaQ  
    Args:
        do_resize (`bool`, *optional*, defaults to `self.do_resize`):
            Whether to resize the video's (height, width) dimensions to the specified `size`. Can be overridden by the
            `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `self.size`):
            Size of the output video after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        size_divisor (`int`, *optional*, defaults to `self.size_divisor`):
            The size by which to make sure both the height and width can be divided.
        default_to_square (`bool`, *optional*, defaults to `self.default_to_square`):
            Whether to default to a square video when resizing, if size is an int.
        resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
            Resampling filter to use if resizing the video. Only has an effect if `do_resize` is set to `True`. Can be
            overridden by the `resample` parameter in the `preprocess` method.
        do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
            Whether to center crop the video to the specified `crop_size`. Can be overridden by `do_center_crop` in the
            `preprocess` method.
        do_pad (`bool`, *optional*):
            Whether to pad the video to the `(max_height, max_width)` of the videos in the batch.
        crop_size (`dict[str, int]` *optional*, defaults to `self.crop_size`):
            Size of the output video after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
            method.
        do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
            Whether to rescale the video by the specified scale `rescale_factor`. Can be overridden by the
            `do_rescale` parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
            Scale factor to use if rescaling the video. Only has an effect if `do_rescale` is set to `True`. Can be
            overridden by the `rescale_factor` parameter in the `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
            Whether to normalize the video. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
        image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
            Mean to use if normalizing the video. This is a float or list of floats the length of the number of
            channels in the video. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
            overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
            Standard deviation to use if normalizing the video. This is a float or list of floats the length of the
            number of channels in the video. Can be overridden by the `image_std` parameter in the `preprocess` method.
            Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `self.image_std`):
            Whether to convert the video to RGB.
        video_metadata (`VideoMetadata`, *optional*):
            Metadata of the video containing information about total duration, fps and total number of frames.
        do_sample_frames (`int`, *optional*, defaults to `self.do_sample_frames`):
            Whether to sample frames from the video before processing or to process the whole video.
        num_frames (`int`, *optional*, defaults to `self.num_frames`):
            Maximum number of frames to sample when `do_sample_frames=True`.
        fps (`int` or `float`, *optional*, defaults to `self.fps`):
            Target frames to sample per second when `do_sample_frames=True`.
        return_tensors (`str` or `TensorType`, *optional*):
            Returns stacked tensors if set to `pt, otherwise returns a list of tensors.
        data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
            The channel dimension format for the output video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - Unset: Use the channel dimension format of the input video.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format for the input video. If unset, the channel dimension format is inferred
            from the input video. Can be one of:
            - `"channels_first"` or `ChannelDimension.FIRST`: video in (num_channels, height, width) format.
            - `"channels_last"` or `ChannelDimension.LAST`: video in (height, width, num_channels) format.
            - `"none"` or `ChannelDimension.NONE`: video in (height, width) format.
        device (`torch.device`, *optional*):
            The device to process the videos on. If unset, the device is inferred from the input videos.
        return_metadata (`bool`, *optional*):
            Whether to return video metadata or not.
        z!Constructs a base VideoProcessor.)visiontorchvision)backendsc            "           e Zd ZdZdZdZdZdZdZdZ	dZ
dZdZdZdZdZdZdZdZdZdZdZdZeZdgZdee   ddf fdZdefd	Zd
ddefdZ	 	 dAde de!e"   de!e#e"e$f      fdZ%	 	 dAdede#e e&f   de!e'   de!e(   de)d   f
dZ*	 	 dAdede!e#e+e,f      de!e+   de)d   fdZ- e.e/      dedee   defd       Z0	 dBde)d   de'de'de1de!e"   de!d   d e'd!e1d"e'd#e'd$e$d%e'd&e!e#e$e)e$   f      d'e!e#e$e)e$   f      d(e!e#e+e2f      def d)Z3e4	 	 	 	 	 dCd*e#e+e5jl                  f   d+e!e#e+e5jl                  f      d,e'd-e'd.e!e#e+e'f      d/e+fd0       Z7dDd1e#e+e5jl                  f   d2e'fd3Z8e4d*e#e+e5jl                  f   de9e&e+e:f   e&e+e:f   f   fd4       Z;e4d5e&e+e:f   fd6       Z<de&e+e:f   fd7Z=de+fd8Z>d9e#e+e5jl                  f   fd:Z?d; Z@e4d<e#e+e5jl                  f   fd=       ZAe4dEd>       ZBdBd?e#e+e)e+   e)e)e+      f   fd@ZC xZDS )FBaseVideoProcessorNTgp?Fpixel_values_videoskwargsreturnc                    t         |           |j                  dd       | _        |j	                         D ]  \  }}	 t        | ||        |j                  d| j                        }|'t        ||j                  d| j                              nd | _	        |j                  d| j                        }|t        |d	      nd | _        t        | j                  j                  j!                               | _        | j"                  D ]E  }|j%                  |      t        | |||          %t        | |t'        t)        | |d                    G y # t        $ r%}t        j                  d| d| d|         |d }~ww xY w)
Nprocessor_classz
Can't set z with value z for sizedefault_to_square)r7   r8   	crop_size)
param_name)super__init__pop_processor_classitemssetattrAttributeErrorloggererrorr7   r   r8   r9   listvalid_kwargs__annotations__keysmodel_valid_processing_keysgetr   getattr)selfr3   keyvalueerrr7   r9   	__class__s          a/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/video_processing_utils.pyr<   zBaseVideoProcessor.__init__   sj    &

+<d C !,,. 	JCc5)	 zz&$)),  tvzzBUW[WmWm7no 		
 JJ{DNN;	MVMby[Ihl ,00A0A0Q0Q0V0V0X+Y(33 	GCzz#*c6#;/c8GD#t,D#EF		G! " z#l5'tfMN	s   E	F  E;;F c                 (     | j                   |fi |S N)
preprocess)rK   videosr3   s      rP   __call__zBaseVideoProcessor.__call__   s    tv000    videoztorch.Tensorc                    t        j                  |      }|j                  d   dk(  s|ddddddf   dk  j                         s|S |ddddddf   dz  }d|ddddddf   z
  dz  |ddddddf   |dddddddf   z  z   }|S )z
        Converts a video to RGB format.

        Args:
            video (`"torch.Tensor"`):
                The video to convert.

        Returns:
            `torch.Tensor`: The converted video.
           .N   g     o@r	   )Fgrayscale_to_rgbshapeany)rK   rW   alphas      rP   convert_to_rgbz!BaseVideoProcessor.convert_to_rgb   s     ""5);;r?ac1al(;c(A'F'F'HL c1al#e+U3a?++s2U3a?5KeTWY[Z[Y[]^`aTaNb5bbrV   metadata
num_framesfpsc                    ||t        d      ||n| j                  }||n| j                  }|j                  }|6|4||j                  t        d      t	        ||j                  z  |z        }||kD  rt        d| d| d      |*t        j                  d|||z        j	                         }|S t        j                  d|      j	                         }|S )a%  
        Default sampling function which uniformly samples the desired number of frames between 0 and total number of frames.
        If `fps` is passed along with metadata, `fps` frames per second are sampled uniformty. Arguments `num_frames`
        and `fps` are mutually exclusive.

        Args:
            metadata (`VideoMetadata`):
                Metadata of the video containing information about total duration, fps and total number of frames.
            num_frames (`int`, *optional*):
                Maximum number of frames to sample. Defaults to `self.num_frames`.
            fps (`int` or `float`, *optional*):
                Target frames to sample per second. Defaults to `self.fps`.

        Returns:
            np.ndarray:
                Indices to sample video frames.
        zc`num_frames`, `fps`, and `sample_indices_fn` are mutually exclusive arguments, please use only one!zAsked to sample `fps` frames per second but no video metadata was provided which is required when sampling with `fps`. Please pass in `VideoMetadata` object or use a fixed `num_frames` per input videoz(Video can't be sampled. The `num_frames=z` exceeds `total_num_frames=z`. r   )
ValueErrorrc   rd   total_num_framesinttorcharange)rK   rb   rc   rd   r3   rg   indicess          rP   sample_framesz BaseVideoProcessor.sample_frames   s   0 ?z5u  $.#9Zt
_c$((#44 #/8<<#7 h  -<sBCJ((::,Fbcsbttwx  !ll1&68H:8UVZZ\G  ll1&67;;=GrV   rT   video_metadatado_sample_framessample_indices_fnc                 V   t        |      }t        ||      }t        |d         rW|rUg }g }t        ||      D ]:  \  }} ||      }	|	|_        |j                  ||	          |j                  |       < |}|}||fS t        |d         st        |d   t              rg| j                  |      D 
cg c]:  }
t        j                  |
D cg c]  }t        j                  |       c}d      < }}
}|rt        d      ||fS | j                  ||      \  }}||fS c c}w c c}}
w )zB
        Decode input videos and sample frames if needed.
        )rm   r   )rb   dimzUSampling frames from a list of images is not supported! Set `do_sample_frames=False`.ro   )r)   r(   r&   zipframes_indicesappend
isinstancerD   fetch_imagesri   stackr\   pil_to_tensorrf   fetch_videos)rK   rT   rm   rn   ro   sampled_videossampled_metadatarW   rb   rk   imagesimages               rP   _decode_and_sample_videosz,BaseVideoProcessor._decode_and_sample_videos$  sV    %V,.vnU &)$)9N!#&v~#> 2x+X>*1'%%eGn5 ''1	2
 $F-N ~%%  q	*&)T* #'"3"3F"; KKV LE!7 LRST  $$o  ~%% *.):):6Uf):)g&~%% !Ms   3D%D 'D% D%input_data_formatdevicec                    g }|D ]~  }t        |t        j                        r>t        |t        j
                  |      }t        j                  |      j                         }||j                  |      }|j                  |        |S )z:
        Prepare the input videos for processing.
        )rw   npndarrayr+   r   FIRSTri   
from_numpy
contiguoustorv   )rK   rT   r   r   processed_videosrW   s         rP   _prepare_input_videosz(BaseVideoProcessor._prepare_input_videosL  s      
	+E%,3E;K;Q;QSde((/::<!(##E*
	+  rV   c           	         t        |j                         t        | j                  j                  j                               dgz          | j                  j                  D ]  }|j                  |t        | |d              ! |j                  d      }|j                  d      }|j                  d      }|j                  d      }|rt        | j                  fi |nd }| j                  ||||      \  }}| j                  |||      } | j                  di |} | j                  di | |j                  d	       |j                  d
      }	 | j                  dd|i|}
|	r||
d<   |
S )Nreturn_tensors)captured_kwargsvalid_processor_keysr   rn   r   rm   )rm   rn   ro   )rT   r   r   data_formatreturn_metadatarT    )r   rG   rD   rE   rF   
setdefaultrJ   r=   r   rl   r   r   _further_process_kwargs_validate_preprocess_kwargs_preprocess)rK   rT   r3   
kwarg_namer   rn   r   rm   ro   r   preprocessed_videoss              rP   rS   zBaseVideoProcessor.preprocessc  s    	"KKM!%d&7&7&G&G&L&L&N!OScRd!d	
 ++;; 	KJj'$
D*IJ	K #JJ':;!::&89H%$45EUGD$6$6A&A[_!%!?!?)-/	 "@ "
 ++6M^gm+n---77(((262 	

=! **%67.d..GfGG4B 01""rV   do_convert_rgb	do_resizer7   size_divisorinterpolationzF.InterpolationModedo_center_cropr9   
do_rescaledo_padrescale_factordo_normalize
image_mean	image_stdr   c           	         t        |      \  }}i }|j                         D ]4  \  }}|r| j                  |      }|r| j                  ||||      }|||<   6 t	        ||      }t        |      \  }}i }|j                         D ]4  \  }}|r| j                  ||      }| j                  ||	||||      }|||<   6 t	        ||      }|rt        j                  |d      n|}t        d|i|      S )N)r7   r   r   r   rq   r2   )datatensor_type)
r%   r?   ra   resizer*   center_croprescale_and_normalizeri   ry   r   )rK   rT   r   r   r7   r   r   r   r9   r   r   r   r   r   r   r   r3   grouped_videosgrouped_videos_indexresized_videos_groupedr^   stacked_videosresized_videosprocessed_videos_groupedr   s                            rP   r   zBaseVideoProcessor._preprocess  s;   ( 0EV/L,,!#%3%9%9%; 	;!E>!%!4!4^!D!%"LXe "- " -;"5)	; ((>@TU 0E^/T,,#% %3%9%9%; 	=!E>!%!1!1.)!L!77
NL*V_N /=$U+	= **BDXYCQ5;;'7Q?Wg"79I!JXfggrV   pretrained_model_name_or_path	cache_dirforce_downloadlocal_files_onlytokenrevisionc                    ||d<   ||d<   ||d<   ||d<   |j                  dd      }|)t        j                  dt               |t	        d      |}|||d	<    | j
                  |fi |\  }	} | j                  |	fi |S )
a  
        Instantiate a type of [`~video_processing_utils.VideoProcessorBase`] from an video processor.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                This can be either:

                - a string, the *model id* of a pretrained video hosted inside a model repo on
                  huggingface.co.
                - a path to a *directory* containing a video processor file saved using the
                  [`~video_processing_utils.VideoProcessorBase.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
                - a path or url to a saved video processor JSON *file*, e.g.,
                  `./my_model_directory/video_preprocessor_config.json`.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded pretrained model video processor should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force to (re-)download the video processor files and override the cached versions if
                they exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
            token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `hf auth login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.


                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.

                </Tip>

            return_unused_kwargs (`bool`, *optional*, defaults to `False`):
                If `False`, then this function returns just the final video processor object. If `True`, then this
                functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
                consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
                `kwargs` which has not been used to update `video_processor` and is otherwise ignored.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
            kwargs (`dict[str, Any]`, *optional*):
                The values in kwargs of any keys which are video processor attributes will be used to override the
                loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
                controlled by the `return_unused_kwargs` keyword parameter.

        Returns:
            A video processor of type [`~video_processing_utils.ImagVideoProcessorBase`].

        Examples:

        ```python
        # We can't instantiate directly the base class *VideoProcessorBase* so let's show the examples on a
        # derived class: *LlavaOnevisionVideoProcessor*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
        )  # Download video_processing_config from huggingface.co and cache.
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "./test/saved_model/"
        )  # E.g. video processor (or model) was saved using *save_pretrained('./test/saved_model/')*
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained("./test/saved_model/video_preprocessor_config.json")
        video_processor = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False
        )
        assert video_processor.do_normalize is False
        video_processor, unused_kwargs = LlavaOnevisionVideoProcessor.from_pretrained(
            "llava-hf/llava-onevision-qwen2-0.5b-ov-hf", do_normalize=False, foo=False, return_unused_kwargs=True
        )
        assert video_processor.do_normalize is False
        assert unused_kwargs == {"foo": False}
        ```r   r   r   r   use_auth_tokenNrThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.r   )r=   warningswarnFutureWarningrf   get_video_processor_dict	from_dict)
clsr   r   r   r   r   r   r3   r   video_processor_dicts
             rP   from_pretrainedz"BaseVideoProcessor.from_pretrained  s    t ({#1 %5!"%z$4d;%MM E   l  #E#F7O'Cs'C'CDa'lek'l$fs}}1<V<<rV   save_directorypush_to_hubc           	      4   |j                  dd      }|;t        j                  dt               |j	                  d      t        d      ||d<   t        j                  j                  |      rt        d| d      t        j                  |d	       |rr|j                  d
d      }|j                  d|j                  t        j                  j                        d         } | j                  |fi |}| j                  |      }| j                  t!        | ||        t        j                  j#                  |t$              }| j'                  |       t(        j+                  d|        |r%| j-                  ||j	                  d             |gS )aq  
        Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the
        [`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.

        Args:
            save_directory (`str` or `os.PathLike`):
                Directory where the video processor JSON file will be saved (will be created if it does not exist).
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs (`dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
        r   Nr   r   r   zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_id)configzVideo processor saved in )r   r   )r=   r   r   r   rI   rf   ospathisfileAssertionErrormakedirssplitsep_create_repo_get_files_timestamps_auto_classr
   joinr   to_json_filerB   info_upload_modified_files)	rK   r   r   r3   r   r   r   files_timestampsoutput_video_processor_files	            rP   save_pretrainedz"BaseVideoProcessor.save_pretrained2  s     $4d;%MM E zz'". l  -F7O77>>.) ?>2BBe!fgg
NT2#ZZ(8$?NjjN,@,@,Mb,QRG'd'':6:G#99.I 't^DA ')ggll>CW&X#56/0K/LMN'' -jj) (  ,,,rV   c                 |   |j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  dd      }|j                  d	d      }	|j                  d
d      }
|j                  dd      }|j                  dd      }|j                  dd      }|)t        j                  dt               |t	        d      |}d|d}|||d<   t               r|	st        j                  d       d}	t        |      }t        j                  j                  |      }t        j                  j                  |      r|}d}n]t        |      r|}t        |      }nDt        }	 t        t         t"        fD cg c]  }t%        |||||||	|||
|d      x}	 |  }}|d   }	 t-        |dd      5 }|j/                         }ddd       t1        j2                        }|j5                  d|      }|rt        j                  d"|        ||fS t        j                  d" d#|        ||fS c c}w # t&        $ r  t(        $ r t+        d| d| dt         d      w xY w# 1 sw Y   xY w# t0        j6                  $ r t+        d | d!      w xY w)$a  
        From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
        video processor of type [`~video_processing_utils.VideoProcessorBase`] using `from_dict`.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.

        Returns:
            `tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the video processor object.
        r   Nr   Fresume_downloadproxiesr   r   r   r   	subfolder _from_pipeline
_from_autor   r   video processor)	file_typefrom_auto_classusing_pipelinez+Offline mode: forcing local_files_only=TrueT)filenamer   r   r   r   r   r   
user_agentr   r   %_raise_exceptions_for_missing_entriesr   z Can't load video processor for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'z2' is the correct path to a directory containing a z filerutf-8encodingvideo_processorz"It looks like the config file at 'z' is not a valid JSON file.zloading configuration file z from cache at )r=   r   r   r   rf   r   rB   r   strr   r   isdirr   r   r   r   r   r   r!   EnvironmentError	ExceptionOSErroropenreadjsonloadsrI   JSONDecodeError)r   r   r3   r   r   r   r   r   r   r   r   r   from_pipeliner   r   is_localresolved_video_processor_filevideo_processor_filer   resolved_fileresolved_video_processor_filesreadertextr   s                           rP   r   z+BaseVideoProcessor.get_video_processor_dicto  s;   $ JJ{D1	$4e< **%6=**Y-

7D)$4d;!::&8%@::j$/JJ{B/	

#3T: **\59%MM E   l  #E#4Y
$+8J'(%5KKEF#(+,I(J%77==!>?77>>78,I)H89#@ ,89V,W)#7 $
 &:;OQ_$`2 )49%-&/+9$+,;-="''1%-&/BG*   !  "2. 2* 1Oq0Q-
	3S7K %v{{}%#'::d#3 #7#;#;<MOc#d  KK56S5TUV
 $V++ KK-.B-C?SpRqr $V++k2, $   67T6U V99V8W X//C.DEK % %
 ## 	45R4SSno 	sB   I "#II J J,/J I ,J	JJ #J;r   c                    |j                         }|j                  dd      }d|v rd|v r|j                  d      |d<   d|v rd|v r|j                  d      |d<    | di |}g }|j                         D ]0  \  }}t        ||      st	        |||       |j                  |       2 |D ]  }|j                  |d        t        j                  d|        |r||fS |S )a  
        Instantiates a type of [`~video_processing_utils.VideoProcessorBase`] from a Python dictionary of parameters.

        Args:
            video_processor_dict (`dict[str, Any]`):
                Dictionary that will be used to instantiate the video processor object. Such a dictionary can be
                retrieved from a pretrained checkpoint by leveraging the
                [`~video_processing_utils.VideoProcessorBase.to_dict`] method.
            kwargs (`dict[str, Any]`):
                Additional parameters from which to initialize the video processor object.

        Returns:
            [`~video_processing_utils.VideoProcessorBase`]: The video processor object instantiated from those
            parameters.
        return_unused_kwargsFr7   r9   NzVideo processor r   )copyr=   r?   hasattrr@   rv   rB   r   )r   r   r3   r  r   	to_removerL   rM   s           rP   r   zBaseVideoProcessor.from_dict  s   "  488:%zz*@%H
 V*> >+1::f+= (& [4H%H06

;0G -5 45 	 ,,. 	&JC,e4  %	&  	"CJJsD!	" 	&&789"F**""rV   c                     t        | j                        }|j                  dd       |j                  dd       | j                  j                  |d<   |S )z
        Serializes this instance to a Python dictionary.

        Returns:
            `dict[str, Any]`: Dictionary of all the attributes that make up this video processor instance.
        rH   N_valid_kwargs_namesvideo_processor_type)r   __dict__r=   rO   __name__)rK   outputs     rP   to_dictzBaseVideoProcessor.to_dict  sJ     $--(

0$7

($/)-)@)@%&rV   c                    | j                         }|j                         D ]3  \  }}t        |t        j                        s!|j                         ||<   5 |j                  dd      }|||d<   t        j                  |dd      dz   S )z
        Serializes this instance to a JSON string.

        Returns:
            `str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
        r>   Nr6      T)indent	sort_keys
)	r  r?   rw   r   r   tolistr=   r   dumps)rK   
dictionaryrL   rM   r>   s        rP   to_json_stringz!BaseVideoProcessor.to_json_string   s     \\^
$**, 	1JC%,"',,.
3	1 &>>*<dC',<J()zz*Q$?$FFrV   json_file_pathc                     t        |dd      5 }|j                  | j                                ddd       y# 1 sw Y   yxY w)z
        Save this instance to a JSON file.

        Args:
            json_file_path (`str` or `os.PathLike`):
                Path to the JSON file in which this image_processor instance's parameters will be saved.
        wr   r   N)r   writer  )rK   r  writers      rP   r   zBaseVideoProcessor.to_json_file5  s<     .#8 	0FLL,,./	0 	0 	0s	    8Ac                 T    | j                   j                   d| j                          S )N )rO   r	  r  )rK   s    rP   __repr__zBaseVideoProcessor.__repr__@  s(    ..))*!D,?,?,A+BCCrV   	json_filec                     t        |dd      5 }|j                         }ddd       t        j                        } | di |S # 1 sw Y   &xY w)a  
        Instantiates a video processor of type [`~video_processing_utils.VideoProcessorBase`] from the path to a JSON
        file of parameters.

        Args:
            json_file (`str` or `os.PathLike`):
                Path to the JSON file containing the parameters.

        Returns:
            A video processor of type [`~video_processing_utils.VideoProcessorBase`]: The video_processor object
            instantiated from that JSON file.
        r   r   r   Nr   )r   r   r   r   )r   r  r   r   r   s        rP   from_json_filez!BaseVideoProcessor.from_json_fileC  sP     )S73 	!v;;=D	!#zz$/*)**	! 	!s   AAc                     t        |t              s|j                  }ddlmc m} t        ||      st        | d      || _        y)a	  
        Register this class with a given auto class. This should only be used for custom video processors as the ones
        in the library are already mapped with `AutoVideoProcessor `.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoVideoProcessor "`):
                The auto class to register this new video processor with.
        r   Nz is not a valid auto class.)	rw   r   r	  transformers.models.automodelsautor  rf   r   )r   
auto_classauto_modules      rP   register_for_auto_classz*BaseVideoProcessor.register_for_auto_classV  sC      *c*#,,J66{J/
|+FGHH$rV   video_url_or_urlsc                     d}t               st        j                  d       d}t        |t              r0t	        t        |D cg c]  }| j                  ||       c}       S t        |||      S c c}w )z
        Convert a single or a list of urls into the corresponding `np.array` objects.

        If a single url is passed, the return value will be a single object. If a list is passed a list of objects is
        returned.
        
torchcodecz`torchcodec` is not installed and cannot be used to decode the video by default. Falling back to `torchvision`. Note that `torchvision` decoding is deprecated and will be removed in future versions. r.   rs   )backendro   )r   r   r   rw   rD   rt   r{   r'   )rK   r'  ro   r*  xs        rP   r{   zBaseVideoProcessor.fetch_videosp  sx     &(MMI $G'.ars\]d//EV/Wstuu/Teff ts   A2)NNrR   )NFFNmain)F)AutoVideoProcessor)Er	  
__module____qualname__r   resampler   r   r7   r   r8   r9   r   r   r   r   r   r   r   rn   rd   rc   rm   r   r   rE   model_input_namesr   r<   r   rU   r#   ra   r$   r   rh   r   floatrl   dictboolr   rD   r   r   r   r   r   BASE_VIDEO_PROCESSOR_DOCSTRINGrS   r   r   r   classmethodr   PathLiker   r   tupler   r   r   r  r  r   r  r  r&  r{   __classcell__)rO   s   @rP   r1   r1      s    KHJIDLIINFJNLN
CJNOL./G!5 G$ G>1L 1 
8 %)+/	33 SM3 eCJ'(	3r ,004&&&& mT12&& #4.	&&
 $H-&& 
n	&&V EI $	   $E#/?*?$@A  	 
 
n	 . &&#&# &&# 
	&#&#p <@!0h^$0h 0h 	0h
 0h sm0h   560h 0h 0h 0h 0h 0h 0h U5$u+#5670h E%e"4560h  !sJ!78!0h$ 
%0hd  8<$!&,0o=',S"++-='>o= E#r{{"234o= 	o=
 o= c4i()o= o= o=b;-eC4D.E ;-TX ;-z s,,1#r{{2B,Cs,	tCH~tCH~-	.s, s,j *#T#s(^ *# *#Xc3h G G*	05bkk1A+B 	0D +uS"++-='> + +$ % %2geCcDcO4S.T grV   r1   r   r-  zvideo processor file)objectobject_classobject_files)Fr   r   r   r  r   	functoolsr   typingr   r   r   r   numpyr   dynamic_module_utilsr
   image_processing_utilsr   r   image_processing_utils_fastr   image_utilsr   r   r   processing_utilsr   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   r    	utils.hubr!   utils.import_utilsr"   video_utilsr#   r$   r%   r&   r'   r(   r)   r*   r+   ri   torchvision.transforms.v2r,   r\   torchvision.transforms
get_loggerr	  rB   r5  r1   r   __doc__formatr   rV   rP   <module>rN     sA     	    1 1  4 @ 
 3     # (
 
 
 "$=:			H	%C" L '" 
,-gg/ gg .	
ggT "++=+I+I!J  !!))5-?-K-K-S-S-Z-Z /CRh .[ .""* 6rV   