
    ho                     |   d Z ddlmZmZ ddlZddlZddlmZ ddlmZ ddl	m
Z
 ddlmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddlmZmZ ddlm Z   ejB                  e"      Z# G d dejH                        Z% G d dejH                        Z&	 d5dejH                  dejN                  dejN                  dejN                  deejN                     de(de(fdZ) G d dejH                        Z* G d dejH                        Z+ G d  d!ejH                        Z, G d" d#ejH                        Z- G d$ d%ejH                        Z. G d& d'e
      Z/ G d( d)ejH                        Z0 G d* d+ejH                        Z1e G d, d-e             Z2e G d. d/e2             Z3 ed01       G d2 d3e2             Z4g d4Z5y)6zPyTorch ViViT model.    )CallableOptionalN)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )VivitConfigc                   f     e Zd ZdZdef fdZddej                  dedej                  fdZ	 xZ
S )	VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    configc                    t         |           |j                  | _        |j                  | _        |j                  | _        | j                  | j
                  d   z  | j                  | j
                  d   z  z  | j                  | j
                  d   z  z  | _        |j                  | _        t        j                  |j                  |j                  |j                  |j                        | _        y )N   r   r   )kernel_sizestride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_size	embed_dimr   Conv3dnum_channels
projectionselfr   	__class__s     f/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/vivit/modeling_vivit.pyr!   zVivitTubeletEmbeddings.__init__0   s     ++ ++ --__ 22$//!"446$//!"446 	
  ++))!3!3ATAT]c]p]p
    pixel_valuesinterpolate_pos_encodingreturnc                 \   |j                   \  }}}}}|sP|| j                  k7  s|| j                  k7  r2t        d| d| d| j                  d    d| j                  d    d	      |j                  ddddd	      }| j	                  |      }|j                  d      j                  dd      }|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r      )shaper#   
ValueErrorpermuter+   flatten	transpose)	r-   r1   r2   
batch_sizer"   r*   heightwidthxs	            r/   forwardzVivitTubeletEmbeddings.forward@   s    >J>P>P;
Jfe'Vt-F%SWSbSbJb$VHAeW4KDOO\]L^K__`aeapapqras`ttvw 
 $++Aq!Q:OOL) IIaL""1a(r0   F)__name__
__module____qualname____doc__r   r!   torchTensorboolr@   __classcell__r.   s   @r/   r   r   %   s9    
{ 
 ELL D ]b]i]i r0   r   c                        e Zd ZdZdef fdZdej                  dededej                  fdZ	dd	ej                  d
e
dej                  fdZ xZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    r   c                    t         |           t        j                  t	        j
                  dd|j                              | _        t        |      | _	        t        j                  t	        j
                  d| j                  j                  dz   |j                              | _        t        j                  |j                        | _        |j                  dd  | _        || _        y )Nr   )r    r!   r   	ParameterrF   zerosr'   	cls_tokenr   patch_embeddingsr&   position_embeddingsDropouthidden_dropout_probdropoutr$   r%   r   r,   s     r/   r!   zVivitEmbeddings.__init__X   s    ekk!Q8J8J&KL 6v >#%<<KK400<<q@&BTBTU$
  zz&"<"<= --ab1r0   
embeddingsr=   r>   r3   c                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  d   z  }	|| j
                  d   z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r   r   bicubicF)sizemodealign_cornersdim)r7   rR   rF   jit
is_tracingr%   r   reshaper9   r   
functionalinterpolateviewcat)r-   rV   r=   r>   r&   num_positionsclass_pos_embedpatch_pos_embedr^   
new_height	new_widthsqrt_num_positionss               r/   r2   z(VivitEmbeddings.interpolate_pos_encodingf   sj    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"tq11
T__Q//	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr0   r1   r2   c                 0   |j                   \  }}}}}| j                  ||      }| j                  j                  |ddg      }	t	        j
                  |	|fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )Nr2   r   r]   )	r7   rQ   rP   tilerF   re   r2   rR   rU   )
r-   r1   r2   r<   r"   r*   r=   r>   rV   
cls_tokenss
             r/   r@   zVivitEmbeddings.forward   s    >J>P>P;
Jfe**<Rj*k
^^((*a);<
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r0   rA   )rB   rC   rD   rE   r   r!   rF   rG   intr2   rH   r@   rI   rJ   s   @r/   rL   rL   Q   sl    { &D5<< &D &DUX &D]b]i]i &DPELL D ]b]i]i r0   rL   modulequerykeyvalueattention_maskscalingrU   c                    t        j                  ||j                  dd            |z  }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }|||z  }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )NrX   )r^   dtype)ptrainingr   r   )rF   matmulr;   r   rb   softmaxfloat32tory   rU   r{   
contiguous)
rq   rr   rs   rt   ru   rv   rU   kwargsattn_weightsattn_outputs
             r/   eager_attention_forwardr      s     <<s}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#n4,,|U3K''1-88:K$$r0   c            	            e Zd Zdef fdZ	 ddej                  deej                     deej                  ej                  f   fdZ	 xZ
S )VivitSelfAttentionr   c                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .g      F)bias)r    r!   r'   num_attention_headshasattrr8   r   rp   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probrv   	is_causalr   Linearqkv_biasrr   rs   rt   r,   s     r/   r!   zVivitSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r0   hidden_states	head_maskr3   c           
         |j                   d   }|d| j                  | j                  f} | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      }t        }| j                  j                  dk7  rt        | j                  j                     } || ||||| j                  | j                  | j                  sdn| j                        \  }	}
|	j!                         d d | j"                  fz   }|	j%                  |      }	|	|
fS )	Nr   rX   r   r   eager        )r   rv   rU   rx   )r7   r   r   rs   rd   r;   rt   rr   r   r   _attn_implementationr   r   rv   r{   r   rZ   r   ra   )r-   r   r   r<   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes               r/   r@   zVivitSelfAttention.forward   sR    #((+
D$<$<d>V>VV	0DHH]+00)<FFq!L	4djj/44i@JJ1aP4djj/44i@JJ1aP(?;;++w6"9$++:Z:Z"[)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EFo--r0   N)rB   rC   rD   r   r!   rF   rG   r   tupler@   rI   rJ   s   @r/   r   r      sT    ]{ ]* PT."\\.6>u||6L.	u||U\\)	*.r0   r   c                   x     e Zd ZdZdef fdZdej                  dej                  dej                  fdZ xZ	S )VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y r   )	r    r!   r   r   r'   denserS   rT   rU   r,   s     r/   r!   zVivitSelfOutput.__init__   sB    YYv1163E3EF
zz&"<"<=r0   r   input_tensorr3   c                 J    | j                  |      }| j                  |      }|S r   r   rU   r-   r   r   s      r/   r@   zVivitSelfOutput.forward   s$    

=1]3r0   )
rB   rC   rD   rE   r   r!   rF   rG   r@   rI   rJ   s   @r/   r   r      s=    
>{ >
U\\  RWR^R^ r0   r   c                        e Zd Zdef fdZdee   fdZd	dej                  de
ej                     dej                  fdZ xZS )
VivitAttentionr   c                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )r    r!   r   	attentionr   outputsetpruned_headsr,   s     r/   r!   zVivitAttention.__init__  s0    +F3%f-Er0   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   r]   )lenr   r   r   r   r   r   rr   rs   rt   r   r   r   union)r-   r   indexs      r/   prune_headszVivitAttention.prune_heads  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r0   r   r   r3   c                 T    | j                  ||      \  }}| j                  ||      }|S r   )r   r   )r-   r   r   self_attn_output_r   s         r/   r@   zVivitAttention.forward   s.    "nn]IF!-}=r0   r   )rB   rC   rD   r   r!   r   rp   r   rF   rG   r   r@   rI   rJ   s   @r/   r   r     sM    "{ ";S ;$U\\ hu||>T `e`l`l r0   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VivitIntermediater   c                 P   t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r    r!   r   r   r'   intermediate_sizer   rS   rT   rU   
isinstance
hidden_actstrr   intermediate_act_fnr,   s     r/   r!   zVivitIntermediate.__init__'  ss    YYv1163K3KL
zz&"<"<=f''-'-f.?.?'@D$'-'8'8D$r0   r   r3   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r   rU   )r-   r   s     r/   r@   zVivitIntermediate.forward0  s4    

=100?]3r0   	rB   rC   rD   r   r!   rF   rG   r@   rI   rJ   s   @r/   r   r   &  s*    9{ 9U\\ ell r0   r   c                   t     e Zd Zdef fdZdej                  dej                  dej                  fdZ xZS )VivitOutputr   c                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r    r!   r   r   r   r'   r   rS   rT   rU   r,   s     r/   r!   zVivitOutput.__init__9  sB    YYv779K9KL
zz&"<"<=r0   r   r   r3   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r/   r@   zVivitOutput.forward>  s.    

=1]3%4r0   r   rJ   s   @r/   r   r   8  s8    >{ >
U\\  RWR^R^ r0   r   c                        e Zd ZdZdef fdZddej                  deej                     dej                  fdZ	 xZ
S )	
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.r   c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r    r!   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr'   layer_norm_epslayernorm_beforelayernorm_afterr,   s     r/   r!   zVivitLayer.__init__H  s    '-'E'E$'/-f5!&) "V-?-?VEZEZ [!||F,>,>FDYDYZr0   r   r   r3   c                     | j                  |      }| j                  ||      }||z   }| j                  |      }| j                  |      }| j	                  ||      }|S r   )r   r   r   r   r   )r-   r   r   hidden_states_normattention_outputlayer_outputs         r/   r@   zVivitLayer.forwardR  sk    !22=A>>*<iH )=8 ++M:((6 {{<?r0   r   )rB   rC   rD   rE   r   r!   rF   rG   r   r@   rI   rJ   s   @r/   r   r   E  sB    X[{ [U\\ hu||>T `e`l`l r0   r   c                   h     e Zd Zdef fdZddej                  deej                     defdZ	 xZ
S )VivitEncoderr   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
r    r!   r   r   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r-   r   r   r.   s      r/   r!   zVivitEncoder.__init__d  sN    ]]fF^F^@_#`1Jv$6#`a
&+# $as   A#r   r   r3   c                 x    t        | j                        D ]  \  }}|||   nd } |||      } t        |      S )N)last_hidden_state)	enumerater   r	   )r-   r   r   ilayer_modulelayer_head_masks         r/   r@   zVivitEncoder.forwardj  sI    (4 	IOA|.7.CilO(HM	I ??r0   r   )rB   rC   rD   r   r!   rF   rG   r   r	   r@   rI   rJ   s   @r/   r   r   c  s;    ,{ ,@U\\ @hu||>T @`o @r0   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )VivitPoolerr   c                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )r    r!   r   r   r'   r   Tanh
activationr,   s     r/   r!   zVivitPooler.__init__s  s9    YYv1163E3EF
'')r0   r   r3   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r   )r-   r   first_token_tensorpooled_outputs       r/   r@   zVivitPooler.forwardx  s6     +1a40

#566r0   r   rJ   s   @r/   r   r   r  s*    ${ $
U\\ ell r0   r   c                   H    e Zd ZU eed<   dZdZdZg ZdZ	dZ
dZdZeedZd Zy)VivitPreTrainedModelr   vivitr1   T)r   
attentionsc                    t        |t        j                  t        j                  f      rm|j                  j
                  j                  d| j                  j                         |j                  %|j                  j
                  j                          yyt        |t        j                        rz|j                  j
                  j                  d| j                  j                         |j                  2|j                  j
                  |j                     j                          yyt        |t        j                        rJ|j                  j
                  j                          |j                  j
                  j                  d       yt        |t              rI|j                   j
                  j                          |j"                  j
                  j                          yy)zInitialize the weightsr   )meanstdNg      ?)r   r   r   r)   weightdatanormal_r   initializer_ranger   zero_	Embeddingpadding_idxr   fill_rL   rP   rR   )r-   rq   s     r/   _init_weightsz"VivitPreTrainedModel._init_weights  sI   fryy"))45 MM&&CT[[5R5R&S{{&  &&( '-MM&&CT[[5R5R&S!!-""6#5#56<<> .-KK""$MM$$S)0!!'')&&++113 1r0   N)rB   rC   rD   r   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr    r0   r/   r   r     sI    $O&*#N"&#(
4r0   r   c                        e Zd Zddedef fdZd Zd Zee		 	 	 dde
ej                     de
ej                     ded	ee   d
ef
d              Z xZS )
VivitModelr   add_pooling_layerc                    t         |   |       || _        t        |      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd| _        | j                          y)zv
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        r   N)r    r!   r   rL   rV   r   encoderr   r   r'   r   	layernormr   pooler	post_init)r-   r   r  r.   s      r/   r!   zVivitModel.__init__  sk    
 	 )&1#F+f&8&8f>S>ST->k&)D 	r0   c                 .    | j                   j                  S r   )rV   rQ   )r-   s    r/   get_input_embeddingszVivitModel.get_input_embeddings  s    ///r0   c                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr  r   r   r   )r-   heads_to_pruner   r   s       r/   _prune_headszVivitModel._prune_heads  sE     +002 	CLE5LLu%//;;EB	Cr0   r1   r   r2   r   r3   c                 F   |t        d      | j                  || j                  j                        }| j	                  ||      }| j                  ||      }|j                  }| j                  |      }| j                  | j                  |      nd}t        ||      S )a  
        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrm   )r   )r   pooler_output)
r8   get_head_maskr   r   rV   r  r   r  r  r
   )	r-   r1   r   r2   r   embedding_outputencoder_outputssequence_outputr   s	            r/   r@   zVivitModel.forward  s    h ?@@&&y$++2O2OP	??<Rj?k+/<<8HT]<+^);;..98<8OO4UY)O[hiir0   )T)NNF)rB   rC   rD   r   rH   r!   r  r  r   r   r   rF   FloatTensorr   r   r
   r@   rI   rJ   s   @r/   r  r    s    { t "0	C  5915).	]ju001]j E--.]j #'	]j
 +,]j 
$]j  ]jr0   r  a  
        ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
    [CLS] token) e.g. for Kinetics-400.

        <Tip>

            Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
            setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
            position embeddings to the higher resolution.

        </Tip>
    )custom_introc                        e Zd Zdef fdZee	 	 	 	 d
deej                     deej                     deej                     dedee   defd	              Z xZS )VivitForVideoClassificationr   c                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r  r   )r    r!   
num_labelsr  r   r   r   r'   Identity
classifierr  r,   s     r/   r!   z$VivitForVideoClassification.__init__7  ss      ++%@
 OUN_N_bcNc"))F$6$68I8IJikititiv 	r0   r1   r   labelsr2   r   r3   c                     | j                   |f||d|}|j                  }| j                  |dddddf         }d}	| | j                  ||| j                  fi |}	t        |	||j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`list[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`list[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```)r   r2   Nr   )losslogitsr   r   )r   r   r&  loss_functionr   r   r   r   )
r-   r1   r   r'  r2   r   outputsr  r*  r)  s
             r/   r@   z#VivitForVideoClassification.forwardC  s    z $.4::$
$-H`$
dj$
 "33Aq!9:%4%%ffdkkLVLD$!//))	
 	
r0   )NNNF)rB   rC   rD   r   r!   r   r   r   rF   r  
LongTensorrH   r   r   r   r@   rI   rJ   s   @r/   r"  r"  (  s    
{ 
  5915-1).j
u001j
 E--.j
 ))*	j

 #'j
 +,j
 
j
  j
r0   r"  )r  r   r"  )r   )6rE   typingr   r   rF   torch.utils.checkpointr   activationsr   modeling_layersr   modeling_outputsr	   r
   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.genericr   r   configuration_vivitr   
get_loggerrB   loggerModuler   rL   rG   floatr   r   r   r   r   r   r   r   r   r   r  r"  __all__r
  r0   r/   <module>r>     s    %    ! 9 b b F & Q K K A , 
		H	%)RYY )XLbii Ln %II%<<% 
% <<	%
 U\\*% % %>1. 1.jbii $RYY >		 $
")) 
+ <@299 @"))   4?  4  4F j% j jD y
"6 y
y
x Pr0   