
    hZ                        U d dl Z d dlZd dlmZ d dlmZ d dlmZ d dlm	Z	m
Z
mZmZ d dlZd dlmZmZ d dlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZmZmZ ddlmZ ddl m!Z!m"Z"m#Z# g dZ$e G d d             Z% G d de%      Z& G d de%      Z' G d dejP                        Z) G d dejP                        Z* G d dejP                        Z+deee&e'f      de,dee-   d ee   d!e.d"e	d#e+fd$Z/d%e0d"e	d#e1eee&e'f      ee-   f   fd&Z2d'eiZ3e4e0e	f   e5d(<   i e3d)d*d+Z6i e3d,d-d+Z7 G d. d/e      Z8 G d0 d1e      Z9 G d2 d3e      Z: G d4 d5e      Z; G d6 d7e      Z< G d8 d9e      Z= G d: d;e      Z> G d< d=e      Z? G d> d?e      Z@ G d@ dAe      ZA G dB dCe      ZB e        e#dDe8j                  fE      ddFdGd ee8   d!e.d"e	d#e+fdH              ZD e        e#dDe9j                  fE      ddFdGd ee9   d!e.d"e	d#e+fdI              ZE e        e#dDe:j                  fE      ddFdGd ee:   d!e.d"e	d#e+fdJ              ZF e        e#dDe;j                  fE      ddFdGd ee;   d!e.d"e	d#e+fdK              ZG e        e#dDe<j                  fE      ddFdGd ee<   d!e.d"e	d#e+fdL              ZH e        e#dDe=j                  fE      ddFdGd ee=   d!e.d"e	d#e+fdM              ZI e        e#dDe>j                  fE      ddFdGd ee>   d!e.d"e	d#e+fdN              ZJ e        e#dDe?j                  fE      ddFdGd ee?   d!e.d"e	d#e+fdO              ZK e        e#dDe@j                  fE      ddFdGd ee@   d!e.d"e	d#e+fdP              ZL e        e#dDeAj                  fE      ddFdGd eeA   d!e.d"e	d#e+fdQ              ZM e        e#dDeBj                  fE      ddFdGd eeB   d!e.d"e	d#e+fdR              ZNy)S    N)Sequence)	dataclass)partial)AnyCallableOptionalUnion)nnTensor)StochasticDepth   )Conv2dNormActivationSqueezeExcitation)ImageClassificationInterpolationMode)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interface)EfficientNetEfficientNet_B0_WeightsEfficientNet_B1_WeightsEfficientNet_B2_WeightsEfficientNet_B3_WeightsEfficientNet_B4_WeightsEfficientNet_B5_WeightsEfficientNet_B6_WeightsEfficientNet_B7_WeightsEfficientNet_V2_S_WeightsEfficientNet_V2_M_WeightsEfficientNet_V2_L_Weightsefficientnet_b0efficientnet_b1efficientnet_b2efficientnet_b3efficientnet_b4efficientnet_b5efficientnet_b6efficientnet_b7efficientnet_v2_sefficientnet_v2_mefficientnet_v2_lc            
           e Zd ZU eed<   eed<   eed<   eed<   eed<   eed<   edej                  f   ed<   e	dd
edede
e   defd       Zy	)_MBConvConfigexpand_ratiokernelstrideinput_channelsout_channels
num_layers.blockNchannels
width_mult	min_valuereturnc                 "    t        | |z  d|      S )N   )r   )r;   r<   r=   s      ]/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/torchvision/models/efficientnet.pyadjust_channelsz_MBConvConfig.adjust_channels9   s    x*4aCC    N)__name__
__module____qualname__float__annotations__intr   r
   Modulestaticmethodr   rB    rC   rA   r3   r3   /   so    KKOCN##D# D5 DXc] D^a D DrC   r3   c                        e Zd Z	 	 	 ddededededededed	ed
eedej                  f      ddf fdZ	e
ded	efd       Z xZS )MBConvConfigNr4   r5   r6   r7   r8   r9   r<   
depth_multr:   .r>   c
           	          | j                  ||      }| j                  ||      }| j                  ||      }|	t        }	t        
|   |||||||	       y rD   )rB   adjust_depthMBConvsuper__init__)selfr4   r5   r6   r7   r8   r9   r<   rP   r:   	__class__s             rA   rU   zMBConvConfig.__init__@   s`     --njI++L*E&&z:>
=Evv~|U_afgrC   c                 D    t        t        j                  | |z              S rD   )rJ   mathceil)r9   rP   s     rA   rR   zMBConvConfig.adjust_depthS   s    499Z*4566rC   )      ?r[   N)rE   rF   rG   rH   rJ   r   r   r
   rK   rU   rL   rR   __classcell__rW   s   @rA   rO   rO   >   s      48hh h 	h
 h h h h h bii01h 
h& 7 7% 7 7rC   rO   c                   h     e Zd Z	 ddededededededeed	ej                  f      d
df fdZ	 xZ
S )FusedMBConvConfigNr4   r5   r6   r7   r8   r9   r:   .r>   c           	      @    |t         }t        | 	  |||||||       y rD   )FusedMBConvrT   rU   )	rV   r4   r5   r6   r7   r8   r9   r:   rW   s	           rA   rU   zFusedMBConvConfig.__init__Z   s*     =Evv~|U_afgrC   rD   )rE   rF   rG   rH   rJ   r   r   r
   rK   rU   r\   r]   s   @rA   r_   r_   X   s|     59hh h 	h
 h h h bii01h 
h hrC   r_   c                        e Zd Zefdedededej                  f   dedej                  f   ddf
 fdZ	d	e
de
fd
Z xZS )rS   cnfstochastic_depth_prob
norm_layer.se_layerr>   Nc                 ~   t         	|           d|j                  cxk  rdk  st        d       t        d      |j                  dk(  xr |j                  |j
                  k(  | _        g }t        j                  }|j                  |j                  |j                        }||j                  k7  r)|j                  t        |j                  |d||             |j                  t        |||j                  |j                  |||             t        d|j                  dz        }|j                   |||t        t        j                  d      	             |j                  t        ||j
                  d|d              t        j                   | | _        t%        |d
      | _        |j
                  | _        y )Nr   r   illegal stride valuekernel_sizere   activation_layer)rj   r6   groupsre   rk      T)inplace)
activationrow)rT   rU   r6   
ValueErrorr7   r8   use_res_connectr
   SiLUrB   r4   appendr   r5   maxr   
Sequentialr:   r   stochastic_depth)
rV   rc   rd   re   rf   layersrk   expanded_channelssqueeze_channelsrW   s
            rA   rU   zMBConv.__init__j   s    	SZZ$1$344 %344"zzQY33E3EIYIY3Y"$77  //0B0BCDTDTU 2 22MM$&&% !)%5 	 !!JJzz(%!1
	
 q#"4"4"9:h02BwWYW^W^hlOmno 	 !3#3#3zlp	
 ]]F+
 /0Eu M,,rC   inputc                 l    | j                  |      }| j                  r| j                  |      }||z  }|S rD   r:   rr   rw   rV   r{   results      rA   forwardzMBConv.forward   7    E"**62FeOFrC   )rE   rF   rG   r   rO   rH   r   r
   rK   rU   r   r   r\   r]   s   @rA   rS   rS   i   sk     .?8-8-  %8- S"))^,	8-
 3		>*8- 
8-tV  rC   rS   c                   ^     e Zd Zdedededej                  f   ddf fdZde	de	fd	Z
 xZS )
ra   rc   rd   re   .r>   Nc           
      8   t         |           d|j                  cxk  rdk  st        d       t        d      |j                  dk(  xr |j                  |j
                  k(  | _        g }t        j                  }|j                  |j                  |j                        }||j                  k7  rh|j                  t        |j                  ||j                  |j                  ||             |j                  t        ||j
                  d|d              nH|j                  t        |j                  |j
                  |j                  |j                  ||             t        j                  | | _        t!        |d      | _        |j
                  | _        y )Nr   r   rh   rj   r6   re   rk   ri   rp   )rT   rU   r6   rq   r7   r8   rr   r
   rs   rB   r4   rt   r   r5   rv   r:   r   rw   )rV   rc   rd   re   rx   rk   ry   rW   s          rA   rU   zFusedMBConv.__init__   si    	SZZ$1$344 %344"zzQY33E3EIYIY3Y"$77//0B0BCDTDTU 2 22MM$&&% #

::)%5	 MM$%s'7'7QS]pt MM$&&$$ #

::)%5	 ]]F+
 /0Eu M,,rC   r{   c                 l    | j                  |      }| j                  r| j                  |      }||z  }|S rD   r}   r~   s      rA   r   zFusedMBConv.forward   r   rC   )rE   rF   rG   r_   rH   r   r
   rK   rU   r   r   r\   r]   s   @rA   ra   ra      sO    2-2-  %2- S"))^,	2-
 
2-hV  rC   ra   c                        e Zd Z	 	 	 	 ddeeeef      dededede	e
dej                  f      de	e   d	df fd
Zded	efdZded	efdZ xZS )r   Ninverted_residual_settingdropoutrd   num_classesre   .last_channelr>   c           
         t         |           t        |        |st        d      t	        |t
              r't        |D cg c]  }t	        |t               c}      st        d      |t        j                  }g }|d   j                  }	|j                  t        d|	dd|t        j                               t        d |D              }
d}|D ]  }g }t!        |j"                        D ]i  }t%        j$                  |      }|r|j&                  |_        d	|_        |t+        |      z  |
z  }|j                  |j-                  |||             |d	z  }k |j                  t        j.                  |         |d
   j&                  }||nd|z  }|j                  t        ||d	|t        j                               t        j.                  | | _        t        j2                  d	      | _        t        j.                  t        j6                  |d      t        j8                  ||            | _        | j=                         D ]  }t	        |t        j>                        rbt        j@                  jC                  |jD                  d       |jF                  Vt        j@                  jI                  |jF                         t	        |t        j                  t        jJ                  f      rSt        j@                  jM                  |jD                         t        j@                  jI                  |jF                         t	        |t        j8                        sdtO        jP                  |jR                        z  }t        j@                  jU                  |jD                  | |       t        j@                  jI                  |jF                          yc c}w )a  
        EfficientNet V1 and V2 main class

        Args:
            inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
            dropout (float): The droupout probability
            stochastic_depth_prob (float): The stochastic depth probability
            num_classes (int): Number of classes
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            last_channel (int): The number of channels on the penultimate layer
        z1The inverted_residual_setting should not be emptyz:The inverted_residual_setting should be List[MBConvConfig]Nr      r   r   c              3   4   K   | ]  }|j                     y wrD   )r9   ).0rc   s     rA   	<genexpr>z(EfficientNet.__init__.<locals>.<genexpr>  s      UC Us   r   rm   ri   T)prn   fan_out)moder[   )+rT   rU   r   rq   
isinstancer   allr3   	TypeErrorr
   BatchNorm2dr7   rt   r   rs   sumranger9   copyr8   r6   rH   r:   rv   featuresAdaptiveAvgPool2davgpoolDropoutLinear
classifiermodulesConv2dinitkaiming_normal_weightbiaszeros_	GroupNormones_rY   sqrtout_featuresuniform_)rV   r   r   rd   r   re   r   srx   firstconv_output_channelstotal_stage_blocksstage_block_idrc   stage_	block_cnfsd_problastconv_input_channelslastconv_output_channelsm
init_rangerW   s                        rA   rU   zEfficientNet.__init__   s    ( 	D!(PQQ0(;;TUaZ=1UVXYYJ"$ %>a$@$O$O! ,!AR\oqovov	
 ! U;T UU, 	1C%'E3>>* $ IIcN	 /8/E/EI,'(I$ 0%2GGJ\\Y__YLM!#$ MM"--/0#	1( #<B"?"L"L3?3K<QRUlQl  '(%!#	
 v.++A.--JJ$/II.<

  	'A!RYY'''y'A66%GGNN166*A=>ahh'qvv&Aryy) 499Q^^#<<
  J;
Cqvv&	'w Vs   Oxc                     | j                  |      }| j                  |      }t        j                  |d      }| j	                  |      }|S )Nr   )r   r   torchflattenr   rV   r   s     rA   _forward_implzEfficientNet._forward_implM  s@    MM!LLOMM!QOOArC   c                 $    | j                  |      S rD   )r   r   s     rA   r   zEfficientNet.forwardW  s    !!!$$rC   )皙?i  NN)rE   rF   rG   r   r	   rO   r_   rH   rJ   r   r   r
   rK   rU   r   r   r   r\   r]   s   @rA   r   r      s    
 (+9=&*a'#+E,@Q2Q,R#Sa' a'  %	a'
 a' Xc299n56a' sma' 
a'Fv & % %F %rC   r   r   r   r   weightsprogresskwargsr>   c                     |#t        |dt        |j                  d                t        | |fd|i|}|"|j	                  |j                  |d             |S )Nr   
categoriesr   T)r   
check_hash)r   lenmetar   load_state_dictget_state_dict)r   r   r   r   r   r   models          rA   _efficientnetr   [  sf     fmSl9S5TU2Ga,aZ`aEg44hSW4XYLrC   archc                 F   | j                  d      rt        t        |j                  d      |j                  d            } |dddddd       |d	dd
ddd
       |d	dd
ddd
       |d	dd
ddd       |d	ddddd       |d	dd
ddd       |d	ddddd      g}d }||fS | j                  d      rbt	        dddddd
      t	        ddd
ddd      t	        ddd
ddd      t        ddd
ddd	      t        d	ddddd      t        d	dd
ddd      g}d}||fS | j                  d      rqt	        dddddd      t	        ddd
ddd      t	        ddd
ddd      t        ddd
ddd      t        d	ddddd      t        d	dd
dd d!      t        d	ddd d"d      g}d}||fS | j                  d#      rqt	        dddddd      t	        ddd
ddd      t	        ddd
dd$d      t        ddd
d$dd%      t        d	dddd&d'      t        d	dd
d&d(d)      t        d	ddd(d*d      g}d}||fS t        d+|        ),Nefficientnet_br<   rP   r<   rP   r   r             r         (   P   p      rm   @  r/   0   @         	         i   r0            i0     i   r1   `   
              i  zUnsupported model type )
startswithr   rO   popr_   rq   )r   r   
bneck_confr   r   s        rA   _efficientnet_confr   n  s   
 '(\fjj6N[a[e[efr[st
q!QB*q!QB*q!QB*q!QB*q!QC+q!QS!,q!QS!,%
! H %l22G 
,	-aAr2q1aAr2q1aAr2q1Aq"c1-Aq#sA.Aq#sB/%
! 4 %l223 
,	-aAr2q1aAr2q1aAr2q1Aq"c1-Aq#sB/Aq#sB/Aq#sA.%
!  %l22 
,	-aAr2q1aAr2q1aAr2q1Aq"c2.Aq#sB/Aq#sB/Aq#sA.%
!  %l22 24&9::rC   r   _COMMON_META)r   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1)min_sizerecipe)!   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r   zJhttps://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pthr   r   	crop_sizeresize_sizeinterpolationidP ImageNet-1Kg?5^IlS@g5^IbW@zacc@1zacc@5gNbX9?g~jts4@1These weights are ported from the original paper.
num_params_metrics_ops
_file_size_docsurl
transformsr   NrE   rF   rG   r   r   r   r   BICUBIC_COMMON_META_V1IMAGENET1K_V1DEFAULTrM   rC   rA   r   r     a    X3CO`OhOh


!##   L
M( GrC   r   c                       e Zd Z ed eeddej                        i eddddd	id
ddd      Z	 ed eeddej                        i edddddd	id
ddd      ZeZy)r   zJhttps://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth   r   r   iv r   g+S@gClW@r   gCl?gM">@r   r   r  z@https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth   zOhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuninggʡS@gƻW@gA`">@$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            )r   r   r   r   r   r  N)rE   rF   rG   r   r   r   r   r  r  r  BILINEARIMAGENET1K_V2r	  rM   rC   rA   r   r     s    X3CO`OhOh


!##   L
M( N3CO`OiOi


!g##   
M0 GrC   r   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r   zJhttps://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pthi   r   i r   gx&T@gp=
W@r   g rh?gʡEA@r   r   r  Nr  rM   rC   rA   r   r      r
  rC   r   c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r   zJhttps://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pthi,  r   r   i r   gnT@g~jtX@r   gZd;?gd;OG@r   r   r  Nr  rM   rC   rA   r   r     a    X3CO`OhOh


"##   L
M( GrC   r   c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r    zJhttps://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pthi|  r   r   i0!'r   gjtT@gt&X@r   g~jt@gKR@r   r   r  Nr  rM   rC   rA   r    r    0  r  rC   r    c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r!   zJhttps://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pthi  r   ir   g#~jT@gx&1(X@r   gx&1$@gK7]@r   r   r  Nr  rM   rC   rA   r!   r!   H  a    X3CO`OhOh


"##  !L
M( GrC   r!   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r"   zJhttps://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pthi  r   ir   gn U@gv:X@r   g rh3@g$d@r   r   r  Nr  rM   rC   rA   r"   r"   `  r  rC   r"   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r#   zJhttps://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pthiX  r   icr   g+U@g'1:X@r   gsh|B@go@r   r   r  Nr  rM   rC   rA   r#   r#   x  r  rC   r#   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r$   zBhttps://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pthr   r   i8nGr   g;OU@gx&18X@r   gZd @gVT@r  r   r  NrE   rF   rG   r   r   r   r   r  _COMMON_META_V2r  r	  rM   rC   rA   r$   r$     se    P+44	


"##   
M4 GrC   r$   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r%   zBhttps://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth  r   i:r   gI+GU@gDlIX@r   gE8@gQ j@r  r   r  Nr  rM   rC   rA   r%   r%     se    P+44	


"##   
M4 GrC   r%   c                   p    e Zd Z ed eeddej                  dd      i eddddd	id
ddd      Z	e	Z
y)r&   zBhttps://download.pytorch.org/models/efficientnet_v2_l-59c71312.pthr  )      ?r  r  )r   r   r   meanstdiHfr   gʡEsU@gOnrX@r   g
ףp=
L@gI+i|@r   r   r  N)rE   rF   rG   r   r   r   r   r  r  r  r	  rM   rC   rA   r&   r&     si    P+33 


###  !L
M0 GrC   r&   
pretrained)r   T)r   r   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B0_Weights
        :members:
    r'   r[   r   r   r   )r   verifyr   r   r   r   r   r   r   r   s        rA   r'   r'     W    . &,,W5G.@AR_bor.s+|!6::i#=|WV^bh rC   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B1_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B1_Weights
        :members:
    r(   r[   皙?r   r   r   )r   r$  r   r   r   r%  s        rA   r(   r(     r&  rC   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B2_Weights
        :members:
    r)   r(  333333?r   r   333333?)r   r$  r   r   r   r%  s        rA   r)   r)   '  r&  rC   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B3_Weights
        :members:
    r*   r*  ffffff?r   r   r+  )r   r$  r   r   r   r%  s        rA   r*   r*   F  \    . &,,W5G.@AR_bor.s+|!

9c"  rC   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B4_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B4_Weights
        :members:
    r+   r-  ?r   r   皙?)r    r$  r   r   r   r%  s        rA   r+   r+   j  r.  rC   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B5_Weights
        :members:
    r,   g?g@r   r   r1  re   MbP?{Gz?epsmomentum)r!   r$  r   r   r   r   r
   r   r%  s        rA   r,   r,     s    . &,,W5G.@AR_bor.s+|!

9c" 2>>utD  rC   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B6_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B6_Weights
        :members:
    r-   r0  g@r   r   r  re   r3  r4  r5  )r"   r$  r   r   r   r   r
   r   r%  s        rA   r-   r-     r8  rC   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B7_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B7_Weights
        :members:
    r.   g       @g@r   r   r  re   r3  r4  r5  )r#   r$  r   r   r   r   r
   r   r%  s        rA   r.   r.     r8  rC   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-S architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
        :members:
    r/   r   r   re   r3  r6  )r$   r$  r   r   r   r   r
   r   r%  s        rA   r/   r/     k    0 (..w7G.@AT.U+|!

9c" 2>>u5  rC   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-M architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
        :members:
    r0   r   r+  re   r3  r<  )r%   r$  r   r   r   r   r
   r   r%  s        rA   r0   r0   #  r=  rC   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-L architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
        :members:
    r1   r   r1  re   r3  r<  )r&   r$  r   r   r   r   r
   r   r%  s        rA   r1   r1   I  r=  rC   )Or   rY   collections.abcr   dataclassesr   	functoolsr   typingr   r   r   r	   r   r
   r   torchvision.opsr   ops.miscr   r   transforms._presetsr   r   utilsr   _apir   r   r   _metar   _utilsr   r   r   __all__r3   rO   r_   rK   rS   ra   r   rH   rJ   boolr   strtupler   r   dictrI   r  r  r   r   r   r   r    r!   r"   r#   r$   r%   r&   r  r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   rM   rC   rA   <module>rP     s     $ !  1 1   + > H ' 6 6 ' S S6 D D D7= 74h h"@RYY @F:")) :zo%299 o%d'l<M.M(NO 3- k"	
   &43
4343 8E,(99:;Xc]JK43p & d38n 
eek 0-k -`k 0k 0k 0k 0k 0k 0 < < : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X D ,0G0U0U!VW48401DH[^ X D ,0G0U0U!VW484 01 DH [^   X  F ,0G0U0U!VW484 01 DH [^   X  F ,0G0U0U!VW484 01 DH [^   X  F ,0I0W0W!XY6:T!23!FJ!]`!! Z !H ,0I0W0W!XY6:T!23!FJ!]`!! Z !H ,0I0W0W!XY6:T!23!FJ!]`!! Z !rC   