
    h                     `   d Z ddlZddlmZ ddlmZ ddlmZm	Z	 ddl
ZddlZddlZddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZmZ ddlmZ ddlmZmZ ddl m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&m'Z' ddl(m)Z)m*Z* ddl+m,Z,  e$jZ                  e.      Z/e e#d       G d de!                    Z0e e#d       G d de!                    Z1d Z2 G d dejf                        Z4 G d dejf                        Z5	 dCd ejf                  d!ejl                  d"ejl                  d#ejl                  d$e	ejl                     d%e7d&e7fd'Z8 G d( d)ejf                        Z9 G d* d+ejf                        Z: G d, d-ejf                        Z; G d. d/ejf                        Z< G d0 d1ejf                        Z= G d2 d3e      Z> G d4 d5ejf                        Z?e# G d6 d7e             Z@e# G d8 d9e@             ZA G d: d;ejf                        ZB e#d<       G d= d>e@             ZC e#d?       G d@ dAe@             ZDg dBZEy)Dz,PyTorch VideoMAE (masked autoencoder) model.    N)deepcopy)	dataclass)CallableOptional)nn)MSELoss   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputTransformersKwargsauto_docstringlogging)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STD)can_return_tuplecheck_model_inputs   )VideoMAEConfigz[
    Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.
    )custom_introc                       e Zd ZU dZdZeej                     ed<   dZ	ee
ej                        ed<   dZee
ej                        ed<   y)VideoMAEDecoderOutputz
    logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
        Pixel reconstruction logits.
    Nlogitshidden_states
attentions)__name__
__module____qualname____doc__r    r   torchFloatTensor__annotations__r!   tupler"        l/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/videomae/modeling_videomae.pyr   r   +   sR    
 +/FHU&&'.8<M8E%"3"345<59Ju00129r,   r   zb
    Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                        ed<   dZeeej                        ed<   y)VideoMAEForPreTrainingOutputz
    loss (`torch.FloatTensor` of shape `(1,)`):
        Pixel reconstruction loss.
    logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
        Pixel reconstruction logits.
    Nlossr    r!   r"   )r#   r$   r%   r&   r0   r   r'   r(   r)   r    r!   r*   r"   r+   r,   r-   r/   r/   <   sg     )-D(5$$
%,*.FHU&&'.8<M8E%"3"345<59Ju00129r,   r/   c                 h   fd}t        j                  t        |       D cg c]
  } ||       c}      }t        j                  |dddddf         |dddddf<   t        j                  |dddddf         |dddddf<   t        j                  |      j                  d      S c c}w )z Sinusoid position encoding tablec           
          t              D cg c]$  }| t        j                  dd|dz  z  z        z  & c}S c c}w )Ni'     )rangenppower)positionhid_jd_hids     r-   get_position_angle_vecz;get_sinusoid_encoding_table.<locals>.get_position_angle_vecV   s;    RWX]R^_288E1
+;e+CDD___s   );Nr   r3   r   )r5   arrayr4   sincosr'   r(   	unsqueeze)
n_positionr9   r:   pos_isinusoid_tables    `   r-   get_sinusoid_encoding_tablerB   R   s    ` XX%PZJ[\5e<\]N ff^Aqt!tG%<=N1add7 ff^Aqt!tG%<=N1add7^,66q99	 ]s   B/c                   (     e Zd ZdZ fdZd Z xZS )VideoMAEEmbeddingsz7
    Construct the patch and position embeddings.

    c                     t         |           t        |      | _        | j                  j                  | _        t        | j                  |j                        | _        || _        y N)	super__init__VideoMAEPatchEmbeddingspatch_embeddingsnum_patchesrB   hidden_sizeposition_embeddingsconfigselfrN   	__class__s     r-   rH   zVideoMAEEmbeddings.__init__f   sR     7 ?00<<#>t?O?OQWQcQc#d r,   c                    | j                  |      }|| j                  j                         j                  |      j	                  |j
                  d      z   }|)|j                  \  }}}||    }|j                  |d|      }|S )NTdevicecopy)rJ   rM   detachtype_astorT   shapereshape)rP   pixel_valuesbool_masked_pos
embeddings
batch_size_num_channelss          r-   forwardzVideoMAEEmbeddings.forwardo   s    **<8
  $":":"A"A"C"K"KJ"W"Z"Z$$4 #[ #
 


 &*4*:*:'J<#_$45J#++JLIJr,   r#   r$   r%   r&   rH   rb   __classcell__rQ   s   @r-   rD   rD   `   s    
r,   rD   c                   (     e Zd ZdZ fdZd Z xZS )rI   aw  
    Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
    height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
    patch_size).

    c           	         t         	|           |j                  }|j                  }|j                  }|j
                  }|j                  }|j                  }t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|| _        || _        t        |      | _        |d   |d   z  |d   |d   z  z  || j                  z  z  }|| _        || _        t        j                  ||| j                  |d   |d   f| j                  |d   |d   f      | _        y )Nr   r   )in_channelsout_channelskernel_sizestride)rG   rH   
image_size
patch_sizera   rL   
num_framestubelet_size
isinstancecollectionsabcIterableintrK   r   Conv3d
projection)
rP   rN   rl   rm   ra   rL   rn   ro   rK   rQ   s
            r-   rH   z VideoMAEPatchEmbeddings.__init__   s>   &&
&&
**((&&
**#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
$$-]jm+
1A0NOS]aeararSrs 	 )&))$$**JqM:a=I%%z!}jmD	
r,   c                    |j                   \  }}}}}|| j                  k7  rt        d      || j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d	      |j	                  dddd	d
      }| j                  |      j                  d      j                  dd      }|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   r   zInput image size (*z) doesn't match model (z).r3   r	      )rZ   ra   
ValueErrorrl   permuterv   flatten	transpose)rP   r\   r_   rn   ra   heightwidthr^   s           r-   rb   zVideoMAEPatchEmbeddings.forward   s    >J>P>P;
Jfe4,,,w  T__Q''5DOOA4F+F$VHAeW4KDOO\]L^K__`aeapapqras`ttvw  $++Aq!Q:__\2::1=GG1M
r,   rc   re   s   @r-   rI   rI      s    
6r,   rI   modulequerykeyvalueattention_maskscalingdropoutc                    t        j                  ||j                  dd            |z  }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }|||z  }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )NrV   )dimdtype)ptrainingr   r3   )r'   matmulr}   r   
functionalsoftmaxfloat32rY   r   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs
             r-   eager_attention_forwardr      s     <<s}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#n4,,|U3K''1-88:K$$r,   c                        e Zd Zdeddf fdZddeej                     deej                  ej                  f   fdZ	 xZ
S )VideoMAESelfAttentionrN   returnNc                    t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  d      | _        t        j                  |j                  | j                  d      | _        t        j                  |j                  | j                  d      | _        |j&                  rot        j(                  t+        j,                  | j                              | _        t        j(                  t+        j,                  | j                              | _        y d | _        d | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      Fbias)rG   rH   rL   num_attention_headshasattrrz   rN   rt   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr   	is_causalr   Linearr   r   r   qkv_bias	Parameterr'   zerosq_biasv_biasrO   s     r-   rH   zVideoMAESelfAttention.__init__   s    : ::a?PVXhHi"6#5#5"6 7334A7  #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EER
99V//1C1C%PYYv1143E3EER
??,,u{{43E3E'FGDK,,u{{43E3E'FGDKDKDKr,   	head_maskc           
         |j                   \  }}}| j                  !t        j                  | j                  d      nd }t
        j                  j                  || j                  j                  |      }t
        j                  j                  || j                  j                  | j                        }t
        j                  j                  || j                  j                  | j                        }	|j                  |d| j                  | j                        j                  dd      }
|j                  |d| j                  | j                        j                  dd      }|	j                  |d| j                  | j                        j                  dd      }t         }| j"                  j$                  dk7  rt&        | j"                  j$                     } || ||
||| j(                  | j*                  | j,                  sdn| j.                  	      \  }}|j1                         d d
 | j2                  fz   }|j5                  |      }||fS )NF)requires_grad)inputweightr   rV   r   r3   eager        )r   r   r   r   )rZ   r   r'   
zeros_liker   r   r   linearr   r   r   r   viewr   r   r}   r   rN   _attn_implementationr   r   r   r   r   sizer   r[   )rP   r!   r   r_   
seq_lengthr`   k_biaskeysvaluesqueries	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes                    r-   rb   zVideoMAESelfAttention.forward   s   $1$7$7!
JGK{{G^!!$++UCdh}}##-V\#]%%M$**BSBSZ^ZeZe%f--&&]4::CTCT[_[f[f&gIIj"d.F.FH`H`akklmopq	kk*b$2J2JDLdLdeoopqstull:r43K3KTMeMefppqrtuv(?;;++w6"9$++:Z:Z"[)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EFo--r,   rF   )r#   r$   r%   r   rH   r   r'   Tensorr*   rb   rd   re   s   @r-   r   r      sK    ~ $ 4.0F .RWX]XdXdfkfrfrXrRs .r,   r   c                   x     e Zd ZdZdef fdZdej                  dej                  dej                  fdZ xZ	S )VideoMAESelfOutputz
    The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    rN   c                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y rF   )	rG   rH   r   r   rL   denseDropouthidden_dropout_probr   rO   s     r-   rH   zVideoMAESelfOutput.__init__  sB    YYv1163E3EF
zz&"<"<=r,   r!   input_tensorr   c                 J    | j                  |      }| j                  |      }|S rF   r   r   rP   r!   r   s      r-   rb   zVideoMAESelfOutput.forward  s$    

=1]3r,   )
r#   r$   r%   r&   r   rH   r'   r   rb   rd   re   s   @r-   r   r     s=    
>~ >
U\\  RWR^R^ r,   r   c                        e Zd Zdef fdZdee   fdZd	dej                  de
ej                     dej                  fdZ xZS )
VideoMAEAttentionrN   c                     t         |           t        |      | _        t	        |      | _        t               | _        y rF   )rG   rH   r   	attentionr   outputsetpruned_headsrO   s     r-   rH   zVideoMAEAttention.__init__%  s0    .v6(0Er,   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   r   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)rP   r   indexs      r-   prune_headszVideoMAEAttention.prune_heads+  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r,   r!   r   r   c                 T    | j                  ||      \  }}| j                  ||      }|S rF   )r   r   )rP   r!   r   self_attn_outputr`   r   s         r-   rb   zVideoMAEAttention.forward=  s.    "nn]IF!-}=r,   rF   )r#   r$   r%   r   rH   r   rt   r   r'   r   r   rb   rd   re   s   @r-   r   r   $  sM    "~ ";S ;$U\\ hu||>T `e`l`l r,   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VideoMAEIntermediaterN   c                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y rF   )rG   rH   r   r   rL   intermediate_sizer   rp   
hidden_actstrr
   intermediate_act_fnrO   s     r-   rH   zVideoMAEIntermediate.__init__E  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r,   r!   r   c                 J    | j                  |      }| j                  |      }|S rF   )r   r   )rP   r!   s     r-   rb   zVideoMAEIntermediate.forwardM  s&    

=100?r,   	r#   r$   r%   r   rH   r'   r   rb   rd   re   s   @r-   r   r   D  s*    9~ 9U\\ ell r,   r   c                   t     e Zd Zdef fdZdej                  dej                  dej                  fdZ xZS )VideoMAEOutputrN   c                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y rF   )
rG   rH   r   r   r   rL   r   r   r   r   rO   s     r-   rH   zVideoMAEOutput.__init__U  sB    YYv779K9KL
zz&"<"<=r,   r!   r   r   c                 T    | j                  |      }| j                  |      }||z   }|S rF   r   r   s      r-   rb   zVideoMAEOutput.forwardZ  s.    

=1]3%4r,   r   re   s   @r-   r   r   T  s8    >~ >
U\\  RWR^R^ r,   r   c                        e Zd ZdZdef fdZddej                  deej                     dej                  fdZ	 xZ
S )	VideoMAELayerz?This corresponds to the Block class in the timm implementation.rN   c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)rG   rH   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormrL   layer_norm_epslayernorm_beforelayernorm_afterrO   s     r-   rH   zVideoMAELayer.__init__e  s    '-'E'E$*6208$V, "V-?-?VEZEZ [!||F,>,>FDYDYZr,   r!   r   r   c                     | j                  |      }| j                  ||      }||z   }| j                  |      }| j                  |      }| j	                  ||      }|S rF   )r   r   r   r   r   )rP   r!   r   hidden_states_normattention_outputlayer_outputs         r-   rb   zVideoMAELayer.forwardo  sk    !22=A>>*<iH )=8 ++M:((6 {{<?r,   rF   )r#   r$   r%   r&   r   rH   r'   r   r   rb   rd   re   s   @r-   r   r   b  sB    I[~ [U\\ hu||>T `e`l`l r,   r   c                   h     e Zd Zdef fdZddej                  deej                     defdZ	 xZ
S )VideoMAEEncoderrN   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
rG   rH   rN   r   
ModuleListr4   num_hidden_layersr   layergradient_checkpointing)rP   rN   r`   rQ   s      r-   rH   zVideoMAEEncoder.__init__  sN    ]]5IaIaCb#caM&$9#cd
&+# $ds   A#r!   r   r   c                 x    t        | j                        D ]  \  }}|||   nd } |||      } t        |      S )Nlast_hidden_state)	enumerater   r   )rP   r!   r   ilayer_modulelayer_head_masks         r-   rb   zVideoMAEEncoder.forward  sI    (4 	IOA|.7.CilO(HM	I ??r,   rF   )r#   r$   r%   r   rH   r'   r   r   r   rb   rd   re   s   @r-   r   r     s;    ,~ ,@U\\ @hu||>T @`o @r,   r   c                   D    e Zd ZU eed<   dZdZdZdZdZ	dZ
dZeedZd Zy)VideoMAEPreTrainedModelrN   videomaer\   T)r!   r"   c                    t        |t        j                  t        j                  f      rm|j                  j
                  j                  d| j                  j                         |j                  %|j                  j
                  j                          yyt        |t        j                        rJ|j                  j
                  j                          |j                  j
                  j                  d       yy)zInitialize the weightsr   )meanstdNg      ?)rp   r   r   ru   r   datanormal_rN   initializer_ranger   zero_r   fill_)rP   r   s     r-   _init_weightsz%VideoMAEPreTrainedModel._init_weights  s    fryy"))45 MM&&CT[[5R5R&S{{&  &&( '-KK""$MM$$S) .r,   N)r#   r$   r%   r   r)   base_model_prefixmain_input_namesupports_gradient_checkpointing_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr  r+   r,   r-   r
  r
    sC    "$O&*#N"&&+

*r,   r
  c                        e Zd Z fdZd Zd Zee	 	 d
dej                  de
ej                     de
ej                     dee   def
d	              Z xZS )VideoMAEModelc                    t         |   |       || _        t        |      | _        t        |      | _        |j                  rd | _        n0t        j                  |j                  |j                        | _        | j                          y )Nr   )rG   rH   rN   rD   r^   r   encoderuse_mean_pooling	layernormr   r   rL   r   	post_initrO   s     r-   rH   zVideoMAEModel.__init__  si     ,V4&v.""!DN\\&*<*<&BWBWXDN 	r,   c                 .    | j                   j                  S rF   )r^   rJ   )rP   s    r-   get_input_embeddingsz"VideoMAEModel.get_input_embeddings  s    ///r,   c                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr   r   r   r   )rP   heads_to_pruner   r   s       r-   _prune_headszVideoMAEModel._prune_heads  sE    
 +002 	CLE5LLu%//;;EB	Cr,   r\   r]   r   r   r   c                    | j                  || j                  j                        }| j                  ||      }| j	                  ||      }|j
                  }| j                  | j                  |      }t        |      S )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
            length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 1568, 768]
        ```r   r  )get_head_maskrN   r   r^   r   r  r"  r   )rP   r\   r]   r   r   embedding_outputencoder_outputssequence_outputs           r-   rb   zVideoMAEModel.forward  st    | &&y$++2O2OP	??<I+/<<8HT]<+^);;>>%"nn_=OAAr,   )NN)r#   r$   r%   rH   r%  r)  r   r   r'   r(   r   
BoolTensorr   r   r   r   rb   rd   re   s   @r-   r  r    s    0C  7;,0	eB''eB "%"2"23eB ELL)	eB
 +,eB 
eB  eBr,   r  c                   H     e Zd Zdef fdZdej                  defdZ xZ	S )VideoMAEDecoderrN   c                    t         |           |j                  |j                  z  |j                  dz  z  }t        |      }|j                  |_        |j                  |_	        |j                  |_        |j                  |_        t        j                  t!        |j                        D cg c]  }t#        |       c}      | _        t        j&                  |j                        | _        |dkD  r t        j*                  |j                  |      nt        j,                         | _        d| _        || _        y c c}w )Nr3   r   F)rG   rH   ra   ro   rm   r   decoder_hidden_sizerL   decoder_num_hidden_layersr   decoder_num_attention_headsr   decoder_intermediate_sizer   r   r   r4   r   decoder_layersr   normr   Identityheadr  rN   )rP   rN   decoder_num_labelsdecoder_configr`   rQ   s        r-   rH   zVideoMAEDecoder.__init__3  s   #0063F3FFIZIZ\]I]]!&)%+%?%?"+1+K+K(-3-O-O*+1+K+K( mm49&:Z:Z4[\q]>*\
 LL!;!;<	I[^_I_BIIf002DEegepeper 		 ',#$ ]s   .D=r!   return_token_numc                     | j                   D ]  } ||d       } |dkD  r|d d | d f   }| j                  |      }| j                  |      }t        |      S )Nr+  r   )r    )r8  r9  r;  r   )rP   r!   r>  r  r    s        r-   rb   zVideoMAEDecoder.forwardI  sn     // 	HL($GM	H a)!.>->-?*?@M 		-0=)$F33r,   )
r#   r$   r%   r   rH   r'   r   rt   rb   rd   re   s   @r-   r2  r2  2  s&    %~ %,4U\\ 4S 4r,   r2  zb
    The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.
    c                        e Zd Z fdZee	 ddej                  dej                  de	ej                     dee   def
d              Z xZS )	VideoMAEForPreTrainingc                    t         |   |       || _        t        |      | _        t        j                  |j                  |j                  d      | _	        t        j                  t        j                  dd|j                              | _        t        | j                  j                  j                   |j                        | _        t%        |      | _        | j)                          y )NFr   r   )rG   rH   rN   r  r  r   r   rL   r4  encoder_to_decoderr   r'   r   
mask_tokenrB   r^   rK   rM   r2  decoderr#  rO   s     r-   rH   zVideoMAEForPreTraining.__init__^  s     %f-"$))F,>,>@Z@Zaf"g,,u{{1a9S9S'TU#>MM$$00&2L2L$
  'v. 	r,   r\   r]   r   r   r   c                 ,    | j                   |f||d|}|j                  }| j                  |      }|j                  \  }}}	|t	        d      | j
                  j                  |dd      j                  |      }
|
j                         j                  |j                  d      }
|
|    j                  |d|	      }|
|   j                  |d|	      }t        j                  ||z   | j                  |z   gd      }| j                  ||j                  d         }|j                   }d}t        j"                         5  | j$                  j&                  d	k7  r|}n|j                  }|j(                  }t        j*                  t,              j                  ||
      ddddddf   }t        j*                  t.              j                  ||
      ddddddf   }||z  |z   }|j                  \  }}}	}}| j$                  j0                  | j$                  j2                  }}| j$                  j4                  r|j7                  |||z  ||	||z  |||z  |      }|j9                  dddddddd	      j;                         }|j7                  |||z  |z  |z  |z  |z  ||z  |z  |	      }||j=                  dd      z
  |j?                  ddd      jA                         dz   z  }|j7                  |||z  |z  |z  |z  |z  ||z  |z  |	z        }n| j$                  j&                  d	k7  rt	        d      |j7                  |||z  ||	||z  |||z  |      }|j9                  dddddddd	      j;                         }|j7                  |||z  |z  |z  |z  |z  ||z  |z  |	z        }|j                  \  }}}	||   j                  |d|	      }ddd       tC               } ||      }tE        |||jF                  |jH                        S # 1 sw Y   ?xY w)a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
            (image_size // patch_size) ** 2`.

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
        >>> import numpy as np
        >>> import torch

        >>> num_frames = 16
        >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")

        >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values

        >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
        >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
        >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss = outputs.loss
        ```)r]   r   Nz!One must provided a boolean mask rV   TrS   r   r   r	   )rT   r   r   ry      r3         r   )r   keepdim)r   unbiasedrJ  gư>zQCan't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False.r0   r    r!   r"   )%r  r  rC  rZ   rz   rM   expandrX   rW   rY   rT   r[   r'   catrD  rE  r    no_gradrN   ra   r   	as_tensorr   r   ro   rm   norm_pix_lossr   r{   r   r  varsqrtr   r/   r!   r"   )rP   r\   r]   r   r   outputsr/  r_   r`   ra   expanded_position_embeddingspos_emb_visiblepos_emb_maskx_fulldecoder_outputsr    r0   framesrT   r   r  r  timer~   r   ro   rm   frames_normvideos_patchlabelsloss_fcts                                  r-   rb   zVideoMAEForPreTraining.forwardo  s   H $14==$
*9Y$
RX$
 "3311/B '6&;&;#
A| "@AA'+'?'?'F'FzSUWY'Z'b'bco'p$'C'J'J'L'O'OWcWjWjqu'O'v$67GHPPQ[]_amn3ODLLZY[]ij Oo=tQ]?]^def 26flFXFXYZF[1\ '']]_ H	Y{{''1,% &,,$**'<=@@V[@\]acgijlprv]vwoo&:;>>fTY>Z[_aeghjnpt[tu%+d2<BLL9JlFE'+{{'?'?AWAW*L{{((L(  j(Z'	  1aAq!Q?JJLL(61Z?%G:U :-
: 	  &D(IIJJ2dJCHHJTQ  +//L(61Z?%G:U :-
:\I  ;;++q0$k   L(  j(Z'	  1aAq!Q?JJL%{{L(61Z?%G:U :-
:\I  +7*<*<'J<!/2:::r<XFQH	YT 9'+!//))	
 	
[H	Y H	Ys   ?JP

PrF   )r#   r$   r%   rH   r   r   r'   r(   r0  r   r   r   r   r/   rb   rd   re   s   @r-   rA  rA  X  s{    " 
 -1	O
''O
 ))O
 ELL)	O

 +,O
 
&O
  O
r,   rA  z
    VideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
    states of all tokens) e.g. for ImageNet.
    c                        e Zd Z fdZee	 	 	 ddeej                     deej                     deej                     de	e
   def
d              Z xZS )	VideoMAEForVideoClassificationc                    t         |   |       |j                  | _        t        |      | _        |j
                  rt        j                  |j                        nd | _	        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _        | j                          y )Nr   )rG   rH   
num_labelsr  r  r!  r   r   rL   fc_normr   r:  
classifierr#  rO   s     r-   rH   z'VideoMAEForVideoClassification.__init__
  s      ++%f- <B;R;Rr||F$6$67X\NTN_N_bcNc"))F$6$68I8IJikititiv 	r,   r\   r   r^  r   r   c                 b    | j                   |fd|i|}|j                  }| j                  #|j                  d      }| j                  |      }n	|dddf   }| j	                  |      }d}	| | j
                  ||| j                  fi |}	t        |	||j                  |j                        S )a!  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import torch
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
        >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        eating spaghetti
        ```r   Nr   r   rL  )
r  r  rd  r  re  loss_functionrN   r   r!   r"   )
rP   r\   r   r^  r   rT  r/  r   r    r0   s
             r-   rb   z&VideoMAEForVideoClassification.forward  s    x $14==#]#]V\#]!33<<#$))!,F\\&)F$QT*F(%4%%ffdkkLVLD$!//))	
 	
r,   )NNN)r#   r$   r%   rH   r   r   r   r'   r   r   r   r   rb   rd   re   s   @r-   ra  ra    s      04,0)-	n
u||,n
 ELL)n
 &	n

 +,n
 
n
  n
r,   ra  )rA  r  r
  ra  )r   )Fr&   collections.abcrq   rU   r   dataclassesr   typingr   r   numpyr5   r'   torch.utils.checkpointr   torch.nnr   activationsr
   modeling_layersr   modeling_outputsr   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.constantsr   r   utils.genericr   r   configuration_videomaer   
get_loggerr#   loggerr   r/   rB   ModulerD   rI   r   floatr   r   r   r   r   r   r   r   r
  r  r2  rA  ra  __all__r+   r,   r-   <module>r}     s\   3   ! %      ! 9 F F & Q M M J A 2 
		H	% 
:K : : 
:; : : : B2bii 2z %II%<<% 
% <<	%
 U\\*% % %<9.BII 9.z $		 @299  
RYY 
. >@bii @ *o * *6 BB+ BB BBJ#4bii #4L 
c
4 c

c
L ~
%< ~
~
B sr,   