
    h|}                     N   U d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	 d dl
Z
d dlmZ ddlmZmZ ddlmZmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZ g dZ G d de      Z G d de      Z  G d dejB                        Z" G d dejB                        Z# G d dejB                        Z$de%de%de%de%de%de	e   de&ded e$fd!Z'd"eiZ(e)e*ef   e+d#<   i e(d$d%d&Z, G d' d(e      Z- G d) d*e      Z. G d+ d,e      Z/ G d- d.e      Z0 G d/ d0e      Z1 e        ed1e-jd                  f2      dd3d4de	e-   de&ded e$fd5              Z3 e        ed1e.jd                  f2      dd3d4de	e.   de&ded e$fd6              Z4 e        ed1e/jd                  f2      dd3d4de	e/   de&ded e$fd7              Z5 e        ed1e0jd                  f2      dd3d4de	e0   de&ded e$fd8              Z6 e        ed92      dd3d4de	e1   de&ded e$fd:              Z7	 	 dAd;e%de%d<d=d>e*d?e&d d=fd@Z8y)B    N)OrderedDict)partial)AnyCallable
NamedTupleOptional   )Conv2dNormActivationMLP)ImageClassificationInterpolationMode)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)VisionTransformerViT_B_16_WeightsViT_B_32_WeightsViT_L_16_WeightsViT_L_32_WeightsViT_H_14_Weightsvit_b_16vit_b_32vit_l_16vit_l_32vit_h_14c                       e Zd ZU eed<   eed<   eed<   ej                  Zedej                  f   ed<   ej                  Zedej                  f   ed<   y)ConvStemConfigout_channelskernel_sizestride.
norm_layeractivation_layerN)__name__
__module____qualname__int__annotations__nnBatchNorm2dr&   r   ModuleReLUr'        c/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/torchvision/models/vision_transformer.pyr"   r"       sJ    K+->>Jbii(913hsBII~.8r2   r"   c                   >     e Zd ZdZdZdededef fdZ fdZ xZ	S )MLPBlockzTransformer MLP block.r	   in_dimmlp_dimdropoutc                 p   t         |   |||gt        j                  d |       | j	                         D ]~  }t        |t        j                        st        j                  j                  |j                         |j                  Tt        j                  j                  |j                  d        y )N)r'   inplacer8   ư>std)super__init__r-   GELUmodules
isinstanceLinearinitxavier_uniform_weightbiasnormal_)selfr6   r7   r8   m	__class__s        r3   r?   zMLPBlock.__init__-   s    '6!2RWWVZdkl 	6A!RYY'''166%GGOOAFFO5		6r2   c           	          |j                  dd       }||dk  rIt        d      D ];  }	dD ]4  }
| d|	dz    d|
 }| d|	z   d|
 }||v s!|j                  |      ||<   6 = t        |   |||||||       y )Nversionr	   )rF   rG   linear_r   .   )getrangepopr>   _load_from_state_dict)rI   
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsrM   itypeold_keynew_keyrK   s                r3   rT   zMLPBlock._load_from_state_dict6   s     !$$Y5?gk1X F. FD!'!uAdV<G!'1Qtf5G*,.8nnW.E
7+	FF 	%	
r2   )
r(   r)   r*   __doc___versionr+   floatr?   rT   __classcell__rK   s   @r3   r5   r5   (   s/     H6s 6S 65 6
 
r2   r5   c                        e Zd ZdZ eej                  d      fdededededed	e	d
e
j
                  j                  f   f fdZde
j                  fdZ xZS )EncoderBlockzTransformer encoder block.r;   eps	num_heads
hidden_dimr7   r8   attention_dropoutr&   .c                     t         |           || _         ||      | _        t	        j
                  |||d      | _        t	        j                  |      | _         ||      | _	        t        |||      | _        y )NT)r8   batch_first)r>   r?   ri   ln_1r-   MultiheadAttentionself_attentionDropoutr8   ln_2r5   mlp)rI   ri   rj   r7   r8   rk   r&   rK   s          r3   r?   zEncoderBlock.__init__Y   sn     	" z*	 33J	Sdrvwzz'* z*	J9r2   inputc                 6   t        j                  |j                         dk(  d|j                          | j	                  |      }| j                  |||d      \  }}| j                  |      }||z   }| j                  |      }| j                  |      }||z   S )NrP   2Expected (batch_size, seq_length, hidden_dim) got F)need_weights)	torch_assertdimshapern   rp   r8   rr   rs   )rI   rt   x_ys        r3   forwardzEncoderBlock.forwardn   s    eiikQ&*\]b]h]h\i(jkIIe""1a"?1LLOIIIaLHHQK1ur2   r(   r)   r*   r`   r   r-   	LayerNormr+   rb   r   rx   r/   r?   Tensorr   rc   rd   s   @r3   rf   rf   V   sw    $ 6=R\\t5T:: : 	:
 : !: S%((//12:*	U\\ 	r2   rf   c                        e Zd ZdZ eej                  d      fdededededed	ed
ede	de
j
                  j                  f   f fdZde
j                  fdZ xZS )Encoderz?Transformer Model Encoder for sequence to sequence translation.r;   rg   
seq_length
num_layersri   rj   r7   r8   rk   r&   .c	           	         t         |           t        j                  t	        j
                  d||      j                  d            | _        t        j                  |      | _	        t               }	t        |      D ]  }
t        ||||||      |	d|
 <    t        j                  |	      | _         ||      | _        y )Nr   g{Gz?r<   encoder_layer_)r>   r?   r-   	Parameterrx   emptyrH   pos_embeddingrq   r8   r   rR   rf   
Sequentiallayersln)rI   r   r   ri   rj   r7   r8   rk   r&   r   r\   rK   s              r3   r?   zEncoder.__init__}   s     	  \\%++aZ*P*X*X]a*X*bczz'*.9mz" 	A+7!,F^A3'(	 mmF+Z(r2   rt   c                     t        j                  |j                         dk(  d|j                          || j                  z   }| j                  | j                  | j                  |                  S )NrP   rv   )rx   ry   rz   r{   r   r   r   r8   )rI   rt   s     r3   r   zEncoder.forward   s\    eiikQ&*\]b]h]h\i(jk***wwt{{4<<#6788r2   r   rd   s   @r3   r   r   z   s    I 6=R\\t5T)) ) 	)
 ) ) ) !) S%((//12):9U\\ 9r2   r   c                   "    e Zd ZdZdddd eej                  d      dfdeded	ed
edededededede	e   de
dej
                  j                  f   de	ee      f fdZdej                   dej                   fdZdej                   fdZ xZS )r   z;Vision Transformer as per https://arxiv.org/abs/2010.11929.        i  Nr;   rg   
image_size
patch_sizer   ri   rj   r7   r8   rk   num_classesrepresentation_sizer&   .conv_stem_configsc                    t         |           t        |        t        j                  ||z  dk(  d       || _        || _        || _        || _        || _	        || _
        |	| _        |
| _        || _        |t        j                         }d}t!        |      D ]g  \  }}|j#                  d| t%        ||j&                  |j(                  |j*                  |j                  |j,                               |j&                  }i |j#                  dt        j.                  ||d             || _        nt        j.                  d|||	      | _        ||z  d
z  }t        j2                  t        j4                  dd|            | _        |dz  }t9        ||||||||      | _        || _        t?               }|
t        j@                  ||	      |d<   nIt        j@                  ||
      |d<   t        jB                         |d<   t        j@                  |
|	      |d<   t        j                  |      | _"        tG        | j0                  t        j.                        r| j0                  jH                  | j0                  j(                  d   z  | j0                  j(                  d   z  }t        jJ                  jM                  | j0                  jN                  tQ        jR                  d|z               | j0                  jT                  Jt        jJ                  jW                  | j0                  jT                         n| j0                  jX                  tG        | j0                  jX                  t        j.                        rt        jJ                  j[                  | j0                  jX                  jN                  dtQ        jR                  d| j0                  jX                  j&                  z               | j0                  jX                  jT                  =t        jJ                  jW                  | j0                  jX                  jT                         t]        | jD                  d      rtG        | jD                  j^                  t        j@                        r| jD                  j^                  j`                  }t        jJ                  jM                  | jD                  j^                  jN                  tQ        jR                  d|z               t        jJ                  jW                  | jD                  j^                  jT                         tG        | jD                  jb                  t        j@                        r{t        jJ                  jW                  | jD                  jb                  jN                         t        jJ                  jW                  | jD                  jb                  jT                         y y )Nr   z&Input shape indivisible by patch size!rP   conv_bn_relu_)in_channelsr#   r$   r%   r&   r'   	conv_lastr   )r   r#   r$   )r   r#   r$   r%   r	   head
pre_logitsactr<   r   g       @)meanr=   )2r>   r?   r   rx   ry   r   r   rj   r7   rk   r8   r   r   r&   r-   r   	enumerate
add_moduler
   r#   r$   r%   r'   Conv2d	conv_projr   zerosclass_tokenr   encoderr   r   rC   TanhheadsrB   r   rD   trunc_normal_rF   mathsqrtrG   zeros_r   rH   hasattrr   in_featuresr   )rI   r   r   r   ri   rj   r7   r8   rk   r   r   r&   r   seq_projprev_channelsr\   conv_stem_layer_configr   heads_layersfan_inrK   s                       r3   r?   zVisionTransformer.__init__   sX    	D!j:-24\]$$$!2&#6 $(}}HM-67H-I D))###A3'($1%;%H%H$:$F$F5<<#9#D#D)?)P)P
 !7 C CD RYY=zghi )1DNYYJJWaDN !J.14
 <<Aq*(EFa
	
 %4?M&#%99Z#EL )+:?R)SL&"$'')L#%99-@+#NL ]]<0
dnnbii0^^//$..2L2LQ2OORVR`R`RlRlmnRooFGG!!$.."7"7TYYq6z=R!S~~"".t~~223^^%%1jAYAY[][d[d6eGGOO((//ctyyt~~OgOgOtOtIt?u   ~~'',,8t~~77<<=4::|,DJJ<Q<QSUS\S\1]ZZ**66FGG!!$**"7"7">">DIIaRXjDY!ZGGNN4::00556djjooryy1GGNN4::??112GGNN4::??//0 2r2   r|   returnc                    |j                   \  }}}}| j                  }t        j                  || j                  k(  d| j                   d| d       t        j                  || j                  k(  d| j                   d| d       ||z  }||z  }| j                  |      }|j                  || j                  ||z        }|j                  ddd      }|S )NzWrong image height! Expected z	 but got !zWrong image width! Expected r   r	   r   )	r{   r   rx   ry   r   r   reshaperj   permute)	rI   r|   nchwpn_hn_ws	            r3   _process_inputz VisionTransformer._process_input  s    WW
1aOOa4??*.KDOOK\\efgehhi,jka4??*.J4??J[[defdggh,ij1f1f NN1IIa#)4 IIaAr2   c                    | j                  |      }|j                  d   }| j                  j                  |dd      }t	        j
                  ||gd      }| j                  |      }|d d df   }| j                  |      }|S )Nr   r   rz   )r   r{   r   expandrx   catr   r   )rI   r|   r   batch_class_tokens       r3   r   zVisionTransformer.forward!  s    "GGAJ !,,33Ar2>II(!,!4LLO adGJJqMr2   )r(   r)   r*   r`   r   r-   r   r+   rb   r   r   rx   r/   listr"   r?   r   r   r   rc   rd   s   @r3   r   r      s    E #&-15<R\\t5T<@g1g1 g1 	g1
 g1 g1 g1 g1 !g1 g1 &c]g1 S%((//12g1 $D$89g1R  * r2   r   r   r   ri   rj   r7   weightsprogresskwargsr   c           
      h   |gt        |dt        |j                  d                |j                  d   d   |j                  d   d   k(  sJ t        |d|j                  d   d          |j                  dd      }t	        d|| ||||d|}	|r"|	j                  |j                  |d	
             |	S )Nr   
categoriesmin_sizer   r   r      )r   r   r   ri   rj   r7   T)r   
check_hashr1   )r   lenmetarS   r   load_state_dictget_state_dict)
r   r   ri   rj   r7   r   r   r   r   models
             r3   _vision_transformerr   4  s     fmSl9S5TU||J'*gll:.Fq.IIIIflGLL4LQ4OPL#.J  E g44hSW4XYLr2   r   _COMMON_METAz(https://github.com/facebookresearch/SWAGz:https://github.com/facebookresearch/SWAG/blob/main/LICENSE)recipelicensec                      e Zd Z ed eed      i edddddd	d
idddd      Z ed eeddej                        i e
dddddd
idddd      Z ed eeddej                        i e
ddddddd
idddd       ZeZy!)"r   z9https://download.pytorch.org/models/vit_b_16-c867db91.pthr   	crop_sizei(r   r   zNhttps://github.com/pytorch/vision/tree/main/references/classification#vit_b_16ImageNet-1KgS㥛DT@g1ZW@zacc@1zacc@5gMb1@g(\t@
                These weights were trained from scratch by using a modified version of `DeIT
                <https://arxiv.org/abs/2012.12877>`_'s training recipe.
            
num_paramsr   r   _metrics_ops
_file_size_docsurl
transformsr   z>https://download.pytorch.org/models/vit_b_16_swag-9ac1b537.pth  r   resize_sizeinterpolationi^-)r   r   g~jtSU@giX@gˡEK@g|?5^t@
                These weights are learnt via transfer learning by end-to-end fine-tuning the original
                `SWAG <https://arxiv.org/abs/2201.08371>`_ weights on ImageNet-1K data.
            r   r   r   r   r   r   zAhttps://download.pytorch.org/models/vit_b_16_lc_swag-4e70ced5.pth+https://github.com/pytorch/vision/pull/5793gbX9xT@gQX@
                These weights are composed of the original frozen `SWAG <https://arxiv.org/abs/2201.08371>`_ trunk
                weights and a linear classifier learnt on top of them trained on ImageNet-1K data.
            r   r   r   r   r   r   r   Nr(   r)   r*   r   r   r   r   IMAGENET1K_V1r   BICUBIC_COMMON_SWAG_METAIMAGENET1K_SWAG_E2E_V1IMAGENET1K_SWAG_LINEAR_V1DEFAULTr1   r2   r3   r   r   _  s#   G.#>

""f##  !
M, %L+33	


""##  !
4 !(O+33	


C""##  !
!6 Gr2   r   c                   X    e Zd Z ed eed      i edddddd	d
idddd      ZeZy)r   z9https://download.pytorch.org/models/vit_b_32-d86f8d99.pthr   r   i1Br   zNhttps://github.com/pytorch/vision/tree/main/references/classification#vit_b_32r   g|?5^R@gW@r   gA`Т@gl	u@r   r   r   N	r(   r)   r*   r   r   r   r   r   r   r1   r2   r3   r   r     s\    G.#>

""f##  !
M, Gr2   r   c                      e Zd Z ed eedd      i eddddd	d
didddd      Z ed eeddej                        i e
ddddddidddd      Z ed eeddej                        i e
dddddddiddd d!      ZeZy")#r   z9https://download.pytorch.org/models/vit_l_16-852ce7e3.pthr      )r   r   i#r   zNhttps://github.com/pytorch/vision/tree/main/references/classification#vit_l_16r   g|?5^S@gFԨW@r   gףp=
N@g;O$@a  
                These weights were trained from scratch by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            r   r   z>https://download.pytorch.org/models/vit_l_16_swag-4f3808c9.pth   r   i0)r   r   gjtV@gT㥛ĠX@gƟv@gy&11@r   r   zAhttps://download.pytorch.org/models/vit_l_16_lc_swag-4d563306.pthr   gMbXIU@g^I[X@r   r   Nr   r1   r2   r3   r   r     s%   G.#3O

#"f##  "
M. %L+33	


#"##  "
4 !(O+33	


C#"##  "
!6 Gr2   r   c                   X    e Zd Z ed eed      i edddddd	d
idddd      ZeZy)r   z9https://download.pytorch.org/models/vit_l_32-c7638314.pthr   r   i[Er   zNhttps://github.com/pytorch/vision/tree/main/references/classification#vit_l_32r   g|?5>S@gGzDW@r   gK7.@gE@r   r   r   Nr   r1   r2   r3   r   r     s\    G.#>

#"f#"  "
M, Gr2   r   c                       e Zd Z ed eeddej                        i edddddd	id
ddd      Z	 ed eeddej                        i eddddddd	idddd      Z
e	Zy)r   z>https://download.pytorch.org/models/vit_h_14_swag-80465313.pth  r   i%)r   r   r   gS#V@g#~jX@r   g~jŏ@gK7I@r   r   r   zAhttps://download.pytorch.org/models/vit_h_14_lc_swag-c1eb923e.pthr   r   i@%r   gZd;OmU@gQnX@g=
ףpd@gIk֢@r   r   N)r(   r)   r*   r   r   r   r   r   r   r   r   r   r1   r2   r3   r   r   2  s    $L+33	


#"##  "
4 !(O+33	


C#"##  "
!6 %Gr2   r   
pretrained)r   T)r   r   c                 R    t         j                  |       } t        dddddd| |d|S )a  
    Constructs a vit_b_16 architecture from
    `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.

    Args:
        weights (:class:`~torchvision.models.ViT_B_16_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.ViT_B_16_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ViT_B_16_Weights
        :members:
                r   r   ri   rj   r7   r   r   r1   )r   verifyr   r   r   r   s      r3   r   r   k  E    ( %%g.G 		 	 	r2   c                 R    t         j                  |       } t        dddddd| |d|S )a  
    Constructs a vit_b_32 architecture from
    `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.

    Args:
        weights (:class:`~torchvision.models.ViT_B_32_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.ViT_B_32_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ViT_B_32_Weights
        :members:
        r  r  r  r  r1   )r   r  r   r  s      r3   r   r     r	  r2   c                 R    t         j                  |       } t        dddddd| |d|S )a  
    Constructs a vit_l_16 architecture from
    `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.

    Args:
        weights (:class:`~torchvision.models.ViT_L_16_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.ViT_L_16_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ViT_L_16_Weights
        :members:
    r           r  r1   )r   r  r   r  s      r3   r   r     E    ( %%g.G 		 	 	r2   c                 R    t         j                  |       } t        dddddd| |d|S )a  
    Constructs a vit_l_32 architecture from
    `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.

    Args:
        weights (:class:`~torchvision.models.ViT_L_32_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.ViT_L_32_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ViT_L_32_Weights
        :members:
    r  r  r  r  r  r  r1   )r   r  r   r  s      r3   r   r     r  r2   )r   Nc                 R    t         j                  |       } t        dddddd| |d|S )a  
    Constructs a vit_h_14 architecture from
    `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale <https://arxiv.org/abs/2010.11929>`_.

    Args:
        weights (:class:`~torchvision.models.ViT_H_14_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.ViT_H_14_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.vision_transformer.VisionTransformer``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/vision_transformer.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ViT_H_14_Weights
        :members:
       r  r  i   i   r  r1   )r   r  r   r  s      r3   r    r      r  r2   r   model_statezOrderedDict[str, torch.Tensor]interpolation_modereset_headsc                    |d   }|j                   \  }}}|dk7  rt        d|j                          | |z  dz  dz   }	|	|k7  r0|dz  }|	dz  }	|ddddddf   }
|ddddddf   }|j                  ddd      }t        t	        j
                  |            }||z  |k7  rt        d||z   d|       |j                  d|||      }| |z  }t        j                  j                  |||d	
      }|j                  d||	      }|j                  ddd      }t        j                  |
|gd      }||d<   |r;t               }|j                         D ]  \  }}|j                  d      r|||<    |}|S )a  This function helps interpolate positional embeddings during checkpoint loading,
    especially when you want to apply a pre-trained model on images with different resolution.

    Args:
        image_size (int): Image size of the new model.
        patch_size (int): Patch size of the new model.
        model_state (OrderedDict[str, torch.Tensor]): State dict of the pre-trained model.
        interpolation_mode (str): The algorithm used for upsampling. Default: bicubic.
        reset_heads (bool): If true, not copying the state of heads. Default: False.

    Returns:
        OrderedDict[str, torch.Tensor]: A state dict which can be loaded into the new model.
    zencoder.pos_embeddingr   z%Unexpected position embedding shape: r	   Nr   zPseq_length is not a perfect square! Instead got seq_length_1d * seq_length_1d = z and seq_length = T)sizemodealign_cornersr   r   )r{   
ValueErrorr   r+   r   r   r   r-   
functionalinterpolaterx   r   r   items
startswith)r   r   r  r  r  r   r   r   rj   new_seq_lengthpos_embedding_tokenpos_embedding_imgseq_length_1dnew_seq_length_1dnew_pos_embedding_imgnew_pos_embeddingmodel_state_copykvs                      r3   interpolate_embeddingsr*    s   *   78M - 3 3Az:Av@ATAT@UVWW J.14q8N
 #a
!+Arr1H5)!QR(3 .55aA>DIIj12=(J6bcp  tA  dA  cC  CU  V`  Ua  b 
 .55a]Tab&*4 !# 9 9"#	 !: !
 !6 = =a^ \ !6 = =aA F!II':<Q&RXYZ/@+,AL#))+ ,1||G,*+$Q', +Kr2   )bicubicF)9r   collectionsr   	functoolsr   typingr   r   r   r   rx   torch.nnr-   ops.miscr
   r   transforms._presetsr   r   utilsr   _apir   r   r   _metar   _utilsr   r   __all__r"   r5   r/   rf   r   r   r+   boolr   r   dictstrr,   r   r   r   r   r   r   r   r   r   r   r   r    r*  r1   r2   r3   <module>r:     sh    #  6 6   0 H ' 6 6 ' B9Z 9+
s +
\!299 !H#9bii #9LQ		 Qh  	
  k"   B & d38n 8K L{ L^{ 4M{ M`{ 46%{ 6%r ,0@0N0N!OP6:T "23 d ]` ev  Q @ ,0@0N0N!OP6:T "23 d ]` ev  Q @ ,0@0N0N!OP6:T "23 d ]` ev  Q @ ,0@0N0N!OP6:T "23 d ]` ev  Q @ !566:T "23 d ]` ev  7 H (KKK 2K 	K
 K &Kr2   