
    hp                        d Z ddlZddlZddlmZmZmZ ddlZddl	Zddlm
Z
 ddlmZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZmZmZm Z  ddl!m"Z"m#Z# ddl$m%Z%  ejL                  e'      Z( G d de
jR                        Z* G d de
jR                        Z+	 d8de
jR                  dejX                  dejX                  dejX                  deejX                     de-de-fdZ. G d de
jR                        Z/ G d de
jR                        Z0 G d  d!e
jR                        Z1 G d" d#e
jR                        Z2 G d$ d%e
jR                        Z3 G d& d'e      Z4 G d( d)e
jR                        Z5e G d* d+e             Z6e G d, d-e6             Z7 G d. d/e
jR                        Z8 ed01       G d2 d3e6             Z9 ed41       G d5 d6e6             Z:g d7Z;y)9zPyTorch ViT model.    N)CallableOptionalUnion)nn   )ACT2FN)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutputMaskedImageModelingOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack) find_pruneable_heads_and_indicesprune_linear_layer)TransformersKwargsauto_docstringlogging	torch_int)can_return_tuplecheck_model_inputs   )	ViTConfigc            	            e Zd ZdZddedef fdZdej                  de	de	dej                  fd	Z
	 	 dd
ej                  deej                     dedej                  fdZ xZS )ViTEmbeddingszb
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
    configuse_mask_tokenc                 J   t         |           t        j                  t	        j
                  dd|j                              | _        |r4t        j                  t	        j                  dd|j                              nd | _	        t        |      | _        | j                  j                  }t        j                  t	        j
                  d|dz   |j                              | _        t        j                  |j                        | _        |j"                  | _        || _        y )Nr   )super__init__r   	Parametertorchrandnhidden_size	cls_tokenzeros
mask_tokenViTPatchEmbeddingspatch_embeddingsnum_patchesposition_embeddingsDropouthidden_dropout_probdropout
patch_sizer   )selfr   r   r+   	__class__s       b/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/transformers/models/vit/modeling_vit.pyr!   zViTEmbeddings.__init__1   s    ekk!Q8J8J&KLQ_",,u{{1a9K9K'LMei 26 :++77#%<<A{QPVPbPb0c#d zz&"<"<= ++    
embeddingsheightwidthreturnc                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  z  }	|| j
                  z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   N      ?r   r      bicubicF)sizemodealign_cornersdim)shaper,   r#   jit
is_tracingr0   r   reshapepermuter   
functionalinterpolateviewcat)r1   r5   r6   r7   r+   num_positionsclass_pos_embedpatch_pos_embedrB   
new_height	new_widthsqrt_num_positionss               r3   interpolate_pos_encodingz&ViTEmbeddings.interpolate_pos_encoding=   s`    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr4   pixel_valuesbool_masked_posrR   c                    |j                   \  }}}}| j                  ||      }|Z|j                   d   }	| j                  j                  ||	d      }
|j	                  d      j                  |
      }|d|z
  z  |
|z  z   }| j                  j                  |dd      }t        j                  ||fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )N)rR   r   r:         ?rA   )rC   r*   r(   expand	unsqueezetype_asr&   r#   rK   rR   r,   r/   )r1   rS   rT   rR   
batch_sizenum_channelsr6   r7   r5   
seq_lengthmask_tokensmask
cls_tokenss                r3   forwardzViTEmbeddings.forwarde   s    3?2D2D/
L&%**<Rj*k
&#))!,J//00ZLK",,R088ED#sTz2[45GGJ ^^**:r2>
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r4   FNF)__name__
__module____qualname____doc__r   boolr!   r#   TensorintrR   r   
BoolTensorr`   __classcell__r2   s   @r3   r   r   ,   s    
y 
$ 
&D5<< &D &DUX &D]b]i]i &DV 7;).	ll "%"2"23 #'	
 
r4   r   c                   f     e Zd ZdZdef fdZddej                  dedej                  fdZ	 xZ
S )	r)   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    r   c                    t         |           |j                  |j                  }}|j                  |j
                  }}t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|d   |d   z  |d   |d   z  z  }|| _        || _        || _        || _
        t        j                  ||||      | _        y )Nr   r   )kernel_sizestride)r    r!   
image_sizer0   r[   r%   
isinstancecollectionsabcIterabler+   r   Conv2d
projection)r1   r   rq   r0   r[   r%   r+   r2   s          r3   r!   zViTPatchEmbeddings.__init__   s    !'!2!2F4E4EJ
$*$7$79K9Kk#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
!!}
15*Q-:VW=:XY$$(&))L+:^hir4   rS   rR   r8   c                    |j                   \  }}}}|| j                  k7  rt        d| j                   d| d      |sV|| j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d		      | j	                  |      j                  d
      j                  dd
      }|S )NzoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r   zInput image size (*z) doesn't match model (z).r<   )rC   r[   
ValueErrorrq   rw   flatten	transpose)r1   rS   rR   rZ   r[   r6   r7   r5   s           r3   r`   zViTPatchEmbeddings.forward   s    2>2D2D/
L&%4,,,!../yaI  (++u8J/J (% 9+,Adooa.@-AE  __\2::1=GG1M
r4   ra   )rc   rd   re   rf   r   r!   r#   rh   rg   r`   rk   rl   s   @r3   r)   r)      s;    jy jELL D ]b]i]i r4   r)   modulequerykeyvalueattention_maskscalingr/   c                    t        j                  ||j                  dd            |z  }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }|||z  }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )Nr:   )rB   dtype)ptrainingr   r<   )r#   matmulr}   r   rH   softmaxfloat32tor   r/   r   
contiguous)
r~   r   r   r   r   r   r/   kwargsattn_weightsattn_outputs
             r3   eager_attention_forwardr      s     <<s}}R'<=GL ==((2U]](SVVW\WbWbcL ==((6??([L !#n4,,|U3K''1-88:K$$r4   c            	            e Zd Zdef fdZ	 ddej                  deej                     deej                  ej                  f   fdZ	 xZ
S )ViTSelfAttentionr   c                 2   t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      || _        |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _	        |j                  | _        | j                  dz  | _        d| _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        t        j                  |j                  | j                  |j                         | _        y )	Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads ry   g      F)bias)r    r!   r%   num_attention_headshasattrr{   r   ri   attention_head_sizeall_head_sizeattention_probs_dropout_probdropout_probr   	is_causalr   Linearqkv_biasr   r   r   r1   r   r2   s     r3   r!   zViTSelfAttention.__init__   sF    : ::a?PVXhHi"6#5#5"6 7334A7 
 #)#=#= #&v'9'9F<V<V'V#W !558P8PP"??//5YYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
r4   hidden_states	head_maskr8   c           
         |j                   d   }|d| j                  | j                  f} | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      } | j                  |      j                  | j                  dd      }t        }| j                  j                  dk7  rt        | j                  j                     } || ||||| j                  | j                  | j                  sdn| j                        \  }	}
|	j!                         d d | j"                  fz   }|	j%                  |      }	|	|
fS )	Nr   r:   r   r<   eager        )r   r   r/   r   )rC   r   r   r   rJ   r}   r   r   r   r   _attn_implementationr   r   r   r   r   r>   r   rF   )r1   r   r   rZ   	new_shape	key_layervalue_layerquery_layerattention_interfacecontext_layerattention_probsnew_context_layer_shapes               r3   r`   zViTSelfAttention.forward   sR    #((+
D$<$<d>V>VV	0DHH]+00)<FFq!L	4djj/44i@JJ1aP4djj/44i@JJ1aP(?;;++w6"9$++:Z:Z"[)<nnLL#}}C$2C2C	*
& #0"4"4"6s";t?Q?Q>S"S%--.EFo--r4   N)rc   rd   re   r   r!   r#   rh   r   tupler`   rk   rl   s   @r3   r   r      sT    ]y ]* PT."\\.6>u||6L.	u||U\\)	*.r4   r   c                   x     e Zd ZdZdef fdZdej                  dej                  dej                  fdZ xZ	S )ViTSelfOutputz
    The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r   c                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y r   )	r    r!   r   r   r%   denser-   r.   r/   r   s     r3   r!   zViTSelfOutput.__init__  sB    YYv1163E3EF
zz&"<"<=r4   r   input_tensorr8   c                 J    | j                  |      }| j                  |      }|S r   r   r/   r1   r   r   s      r3   r`   zViTSelfOutput.forward  s$    

=1]3r4   )
rc   rd   re   rf   r   r!   r#   rh   r`   rk   rl   s   @r3   r   r      s=    
>y >
U\\  RWR^R^ r4   r   c                        e Zd Zdef fdZdee   fdZd	dej                  de
ej                     dej                  fdZ xZS )
ViTAttentionr   c                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )r    r!   r   	attentionr   outputsetpruned_headsr   s     r3   r!   zViTAttention.__init__  s0    )&1#F+Er4   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   rA   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)r1   r   indexs      r3   prune_headszViTAttention.prune_heads  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r4   r   r   r8   c                 T    | j                  ||      \  }}| j                  ||      }|S r   )r   r   )r1   r   r   self_attn_output_r   s         r3   r`   zViTAttention.forward'  s.    "nn]IF!-}=r4   r   )rc   rd   re   r   r!   r   ri   r   r#   rh   r   r`   rk   rl   s   @r3   r   r     sM    "y ";S ;$U\\ hu||>T `e`l`l r4   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )ViTIntermediater   c                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r    r!   r   r   r%   intermediate_sizer   rr   
hidden_actstrr   intermediate_act_fnr   s     r3   r!   zViTIntermediate.__init__.  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r4   r   r8   c                 J    | j                  |      }| j                  |      }|S r   )r   r   )r1   r   s     r3   r`   zViTIntermediate.forward6  s&    

=100?r4   	rc   rd   re   r   r!   r#   rh   r`   rk   rl   s   @r3   r   r   -  s*    9y 9U\\ ell r4   r   c                   t     e Zd Zdef fdZdej                  dej                  dej                  fdZ xZS )	ViTOutputr   c                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r    r!   r   r   r   r%   r   r-   r.   r/   r   s     r3   r!   zViTOutput.__init__=  sB    YYv779K9KL
zz&"<"<=r4   r   r   r8   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r3   r`   zViTOutput.forwardB  s.    

=1]3%4r4   r   rl   s   @r3   r   r   <  s8    >y >
U\\  RWR^R^ r4   r   c                        e Zd ZdZdef fdZddej                  deej                     dej                  fdZ	 xZ
S )	ViTLayerz?This corresponds to the Block class in the timm implementation.r   c                 r   t         |           |j                  | _        d| _        t	        |      | _        t        |      | _        t        |      | _	        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r    r!   chunk_size_feed_forwardseq_len_dimr   r   r   intermediater   r   r   	LayerNormr%   layer_norm_epslayernorm_beforelayernorm_afterr   s     r3   r!   zViTLayer.__init__L  s    '-'E'E$%f-+F3' "V-?-?VEZEZ [!||F,>,>FDYDYZr4   r   r   r8   c                     | j                  |      }| j                  ||      }||z   }| j                  |      }| j                  |      }| j	                  ||      }|S r   )r   r   r   r   r   )r1   r   r   hidden_states_normattention_outputlayer_outputs         r3   r`   zViTLayer.forwardV  sk    !22=A>>*<iH )=8 ++M:((6 {{<?r4   r   )rc   rd   re   rf   r   r!   r#   rh   r   r`   rk   rl   s   @r3   r   r   I  sB    I[y [U\\ hu||>T `e`l`l r4   r   c                   h     e Zd Zdef fdZddej                  deej                     defdZ	 xZ
S )
ViTEncoderr   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w rb   )
r    r!   r   r   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r1   r   r   r2   s      r3   r!   zViTEncoder.__init__h  sN    ]]eFD\D\>]#^HV$4#^_
&+# $_s   A#r   r   r8   c                 x    t        | j                        D ]  \  }}|||   nd } |||      } t        |      S )N)last_hidden_state)	enumerater   r
   )r1   r   r   ilayer_modulelayer_head_masks         r3   r`   zViTEncoder.forwardn  sI    (4 	IOA|.7.CilO(HM	I ??r4   r   )rc   rd   re   r   r!   r#   rh   r   r
   r`   rk   rl   s   @r3   r   r   g  s;    ,y ,@U\\ @hu||>T @`o @r4   r   c                       e Zd ZU eed<   dZdZdZddgZdZ	dZ
dZdZeedZdeej$                  ej&                  ej(                  f   fd	Zy
)ViTPreTrainedModelr   vitrS   Tr   r   )r   
attentionsr~   c                    t        |t        j                  t        j                  f      rt        j                  j                  |j                  j                  j                  t        j                        d| j                  j                        j                  |j                  j                        |j                  _        |j                  %|j                  j                  j                          yyt        |t        j                         rJ|j                  j                  j                          |j                  j                  j#                  d       yt        |t$              rdt        j                  j                  |j&                  j                  j                  t        j                        d| j                  j                        j                  |j&                  j                        |j&                  _        t        j                  j                  |j(                  j                  j                  t        j                        d| j                  j                        j                  |j(                  j                        |j(                  _        |j*                  %|j*                  j                  j                          yyy)zInitialize the weightsr   )meanstdNrV   )rr   r   r   rv   inittrunc_normal_weightdatar   r#   r   r   initializer_ranger   r   zero_r   fill_r   r,   r&   r(   )r1   r~   s     r3   _init_weightsz ViTPreTrainedModel._init_weights  s   fryy"))45 "$!6!6""%%emm43DKKDaDa "7 "b$$% MM {{&  &&( '-KK""$MM$$S)..0gg.C.C**//225==AKK11 /D / b++112	 &&+ %'GG$9$9  %%((7KK11 %: % b!!''(	 !   ,!!&&,,. - /r4   N)rc   rd   re   r   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr   r   _can_record_outputsr   r   r   rv   r   r   r4   r3   r   r   v  sm    $O&*#(*5N"&!&
/E"))RYY*L$M /r4   r   c                        e Zd Zddededef fdZdefdZdee	e
e	   f   fdZee	 	 	 	 dd	eej                      d
eej"                     deej                      dee   dee   defd              Z xZS )ViTModelr   add_pooling_layerr   c                    t         |   |       || _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd| _        | j                          y)z
        add_pooling_layer (bool, *optional*, defaults to `True`):
            Whether to add a pooling layer
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        )r   r   N)r    r!   r   r   r5   r   encoderr   r   r%   r   	layernorm	ViTPoolerpooler	post_init)r1   r   r  r   r2   s       r3   r!   zViTModel.__init__  sm     	 '~N!&)f&8&8f>S>ST+<i'$ 	r4   r8   c                 .    | j                   j                  S r   )r5   r*   )r1   s    r3   get_input_embeddingszViTModel.get_input_embeddings  s    ///r4   heads_to_prunec                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr  r   r   r   )r1   r  r   r   s       r3   _prune_headszViTModel._prune_heads  sE    
 +002 	CLE5LLu%//;;EB	Cr4   rS   rT   r   rR   r   c                    |t        d      | j                  || j                  j                        }| j                  j
                  j                  j                  j                  }|j                  |k7  r|j                  |      }| j	                  |||      }| j                  ||      }|j                  }	| j                  |	      }	| j                  | j                  |	      nd}
t        |	|
      S )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)rT   rR   )r   )r   pooler_output)r{   get_head_maskr   r   r5   r*   rw   r   r   r   r  r   r  r  r   )r1   rS   rT   r   rR   r   expected_dtypeembedding_outputencoder_outputssequence_outputpooled_outputs              r3   r`   zViTModel.forward  s     ?@@ &&y$++2O2OP	 99DDKKQQ/'??>:L??/Tl + 
 ,0<<8HT]<+^);;..98<8OO4UY)O[hiir4   )TFNNNN)rc   rd   re   r   rg   r!   r)   r  dictri   listr  r   r   r   r#   rh   rj   r   r   r   r`   rk   rl   s   @r3   r  r    s    y T Z^ &0&8 0C4T#Y+? C  046:,037&ju||,&j "%"2"23&j ELL)	&j
 #+4.&j +,&j 
$&j  &jr4   r  c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )r  r   c                     t         |           t        j                  |j                  |j
                        | _        t        |j                     | _	        y r   )
r    r!   r   r   r%   pooler_output_sizer   r   
pooler_act
activationr   s     r3   r!   zViTPooler.__init__  s>    YYv1163L3LM
 !2!23r4   r   r8   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   r,  )r1   r   first_token_tensorr$  s       r3   r`   zViTPooler.forward  s6     +1a40

#566r4   r   rl   s   @r3   r  r    s*    4y 4
U\\ ell r4   r  ac  
    ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://huggingface.co/papers/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    )custom_introc                        e Zd Zdef fdZee	 	 	 	 d
deej                     deej                     deej                     dee   dee   defd	              Z xZS )ViTForMaskedImageModelingr   c                 N   t         |   |       t        |dd      | _        t	        j
                  t	        j                  |j                  |j                  dz  |j                  z  d      t	        j                  |j                              | _        | j                          y )NFT)r  r   r<   r   )in_channelsout_channelsro   )r    r!   r  r   r   
Sequentialrv   r%   encoder_strider[   PixelShuffledecoderr  r   s     r3   r!   z"ViTForMaskedImageModeling.__init__  s     FeDQ}}II"..#22A58K8KK
 OOF112
 	r4   rS   rT   r   rR   r   r8   c                 N   |g| j                   j                  | j                   j                  k7  r:t        d| j                   j                   d| j                   j                   d       | j                  |f|||d|}|j
                  }|ddddf   }|j                  \  }}	}
t        j                  |	dz        x}}|j                  dd	d      j                  ||
||      }| j                  |      }d}|| j                   j                  | j                   j                  z  }|j                  d
||      }|j                  | j                   j                  d      j                  | j                   j                  d	      j                  d      j                         }t         j"                  j%                  ||d      }||z  j'                         |j'                         dz   z  | j                   j(                  z  }t+        |||j,                  |j.                        S )a+  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
        >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 224, 224]
        ```NzWhen `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that the reconstructed image has the same dimensions as the input. Got `patch_size` = z and `encoder_stride` = ry   )rT   r   rR   r   r;   r   r<   r:   none)	reductiongh㈵>)lossreconstructionr   r   )r   r0   r6  r{   r   r   rC   mathfloorrG   rF   r8  rq   repeat_interleaverX   r   r   rH   l1_losssumr[   r   r   r   )r1   rS   rT   r   rR   r   outputsr#  rZ   sequence_lengthr[   r6   r7   reconstructed_pixel_valuesmasked_im_lossr>   r^   reconstruction_losss                     r3   r`   z!ViTForMaskedImageModeling.forward  s   L &DKK,B,BdkkF`F`,`&&*kk&<&<%==UVZVaVaVpVpUqqrt  /7dhh/
+%=	/

 /
 "33 *!QR%04C4I4I1
O\OS$899)11!Q:BB:|]cejk &*\\/%B"&;;))T[[-C-CCD-55b$EO11$++2H2H!L""4;;#9#91=1	  #%--"7"7F`lr"7"s1D8==?488:PTCTUX\XcXcXpXppN(5!//))	
 	
r4   r%  )rc   rd   re   r   r!   r   r   r   r#   rh   rj   rg   r   r   r   r`   rk   rl   s   @r3   r1  r1    s    y "  046:,037P
u||,P
 "%"2"23P
 ELL)	P

 #+4.P
 +,P
 
#P
  P
r4   r1  a  
    ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                        e Zd Zdef fdZee	 	 	 	 d
deej                     deej                     deej                     dee
   dee   defd	              Z xZS )ViTForImageClassificationr   c                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r  r   )r    r!   
num_labelsr  r   r   r   r%   Identity
classifierr  r   s     r3   r!   z"ViTForImageClassification.__init__  ss      ++Fe< OUN_N_bcNc"))F$6$68I8IJikititiv 	r4   rS   r   labelsrR   r   r8   c                     | j                   |f||d|}|j                  }|dddddf   }| j                  |      }	d}
| | j                  ||	| j                  fi |}
t        |
|	|j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        )r   rR   Nr   )r<  logitsr   r   )r   r   rM  loss_functionr   r   r   r   )r1   rS   r   rN  rR   r   rC  r#  r$  rP  r<  s              r3   r`   z!ViTForImageClassification.forward  s    " /7dhh/
%=/
 	/
 "33'1a0/%4%%ffdkkLVLD$!//))	
 	
r4   r%  )rc   rd   re   r   r!   r   r   r   r#   rh   rg   r   r   r   r`   rk   rl   s   @r3   rI  rI  q  s    
y 
  04,0)-37#
u||,#
 ELL)#
 &	#

 #+4.#
 +,#
 
#
  #
r4   rI  )rI  r1  r  r   )r   )<rf   collections.abcrs   r>  typingr   r   r   r#   torch.utils.checkpointr   activationsr   modeling_layersr	   modeling_outputsr
   r   r   r   modeling_utilsr   r   processing_utilsr   pytorch_utilsr   r   utilsr   r   r   r   utils.genericr   r   configuration_vitr   
get_loggerrc   loggerModuler   r)   rh   floatr   r   r   r   r   r   r   r   r   r  r  r1  rI  __all__r  r4   r3   <module>rc     s      , ,    ! 9  G & Q K K A ( 
		H	%UBII Up$ $\ %II%<<% 
% <<	%
 U\\*% % %<1.ryy 1.hBII "299 >bii 
		 
) <@ @ */ */ */Z Gj! Gj GjT		  	d
 2 d
d
N 2
 2 2
2
j gr4   