
    h5                        d dl Z d dlmZ d dlmZmZmZ d dlZd dlmZ ddl	m
Z
mZ ej                  j                  j                  Z G d dej                  j                        Z G d	 d
ej                  j"                        Z G d de      Z G d de      Z G d dej                  j                        Z G d dej                  j"                        Z G d dej                  j                        Zy)    N)Sequence)CallableOptionalUnion)Tensor   )_log_api_usage_once_make_ntuplec                        e Zd ZdZ	 ddedef fdZdededede	d	e
e   d
e
e   de
e   f fdZdedefdZdefdZ xZS )FrozenBatchNorm2da!  
    BatchNorm2d where the batch statistics and the affine parameters are fixed

    Args:
        num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
        eps (float): a value added to the denominator for numerical stability. Default: 1e-5
    num_featuresepsc                 n   t         |           t        |        || _        | j	                  dt        j                  |             | j	                  dt        j                  |             | j	                  dt        j                  |             | j	                  dt        j                  |             y )Nweightbiasrunning_meanrunning_var)super__init__r	   r   register_buffertorchoneszeros)selfr   r   	__class__s      R/var/www/html/eduruby.in/venv/lib/python3.12/site-packages/torchvision/ops/misc.pyr   zFrozenBatchNorm2d.__init__   s    
 	D!Xuzz,'?@VU[[%>?^U[[-FG]EJJ|,DE    
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsc           	      H    |dz   }||v r||= t         	|   |||||||       y )Nnum_batches_tracked)r   _load_from_state_dict)
r   r   r   r    r!   r"   r#   r$   num_batches_tracked_keyr   s
            r   r'   z'FrozenBatchNorm2d._load_from_state_dict$   s?     #)+@"@"j023%oWa	
r   xreturnc                 R   | j                   j                  dddd      }| j                  j                  dddd      }| j                  j                  dddd      }| j                  j                  dddd      }||| j
                  z   j                         z  }|||z  z
  }||z  |z   S )N   )r   reshaper   r   r   r   rsqrt)r   r)   wbrvrmscaler   s           r   forwardzFrozenBatchNorm2d.forward6   s     KK2q!,IIaQ*%%aQ2&&q"a3R$((]))++2:~5y4r   c                     | j                   j                   d| j                  j                  d    d| j                   dS )N(r   z, eps=))r   __name__r   shaper   )r   s    r   __repr__zFrozenBatchNorm2d.__repr__A   s;    ..))*!DKK,=,=a,@+AzQRSSr   )gh㈵>)r9   
__module____qualname____doc__intfloatr   dictstrboollistr'   r   r5   r;   __classcell__r   s   @r   r   r      s     FF F

 
 	

 
 3i
 c
 I
$	  	 F 	 T# Tr   r   c                       e Zd Zddddej                  j
                  ej                  j                  dddej                  j                  f
dedede	ee
edf   f   d	e	ee
edf   f   d
ee	ee
edf   ef      dedeedej                  j                  f      deedej                  j                  f      de	ee
edf   f   dee   dee   dedej                  j                  f   ddf fdZ xZS )ConvNormActivation   r,   NTin_channelsout_channelskernel_size.stridepaddinggroups
norm_layeractivation_layerdilationinplacer   
conv_layerr*   c           
      N  	 |t        t              rt        	t              rdz
  dz  	z  }n\t        t              rt              n
t        	      }t	        |      t	        	|      	t        	fdt        |      D              }||d u } |||||	||      g}||j                   ||             ||
i nd|
i}|j                   |di |       t        | $  |  t        |        || _        | j                  t        k(  rt        j                  d       y y )Nr,   r   c              3   @   K   | ]  }|   d z
  dz  |   z    yw)r,   r   N ).0irR   rL   s     r   	<genexpr>z.ConvNormActivation.__init__.<locals>.<genexpr>]   s(     bAQ!!3 9HQK Gbs   )rR   rO   r   rS   zhDon't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead.rW   )
isinstancer?   r   lenr
   tuplerangeappendr   r   r	   rK   r   rH   warningswarn)r   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   r   rT   	_conv_dimlayersparamsr   s      `     `      r   r   zConvNormActivation.__init__F   s5     ?+s+
8S0I&?q08;0:;0QC,WZ[cWd	*;	B')<bQVW`Qabb<%D !	
 !MM*\23'"?RG0DFMM*4V45&!D!(>>//MMz 0r   )r9   r<   r=   r   nnBatchNorm2dReLUConv2dr?   r   r]   r   rB   r   ModulerC   r   rE   rF   s   @r   rH   rH   E   s^   
 45./>B?Dxx?S?SEJXX]]01"&#5:XX__55 5 3c3h/0	5
 c5c?*+5 %U38_c 9:;5 5 Xc588??&:;<5 #8C,@#AB5 U38_,-5 $5 tn5 S%((//125 
5 5r   rH   c                       e Zd ZdZddddej
                  j                  ej
                  j                  dddf	dedede	ee
eef   f   d	e	ee
eef   f   d
ee	ee
eef   ef      dedeedej
                  j                  f      deedej
                  j                  f      de	ee
eef   f   dee   dee   ddf fdZ xZS )Conv2dNormActivationa  
    Configurable block used for Convolution2d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.

    rI   r,   NTrJ   rK   rL   rM   rN   rO   rP   .rQ   rR   rS   r   r*   c                 j    t         |   |||||||||	|
|t        j                  j                         y N)r   r   r   re   rh   r   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   r   r   s               r   r   zConv2dNormActivation.__init__   >     	HHOO	
r   )r9   r<   r=   r>   r   re   rf   rg   r?   r   r]   r   rB   r   ri   rC   r   rE   rF   s   @r   rk   rk   ~   s<   , 45./>B?Dxx?S?SEJXX]]01"&#

 
 3c3h/0	

 c5c?*+
 %U38_c 9:;
 
 Xc588??&:;<
 #8C,@#AB
 U38_,-
 $
 tn
 

 
r   rk   c                       e Zd ZdZddddej
                  j                  ej
                  j                  dddf	dedede	ee
eeef   f   d	e	ee
eeef   f   d
ee	ee
eeef   ef      dedeedej
                  j                  f      deedej
                  j                  f      de	ee
eeef   f   dee   dee   ddf fdZ xZS )Conv3dNormActivationa  
    Configurable block used for Convolution3d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input video.
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
    rI   r,   NTrJ   rK   rL   rM   rN   rO   rP   .rQ   rR   rS   r   r*   c                 j    t         |   |||||||||	|
|t        j                  j                         y rm   )r   r   r   re   Conv3drn   s               r   r   zConv3dNormActivation.__init__   ro   r   )r9   r<   r=   r>   r   re   BatchNorm3drg   r?   r   r]   r   rB   r   ri   rC   r   rE   rF   s   @r   rq   rq      sH   * 9:34CG?Dxx?S?SEJXX]]56"&#

 
 3c3m 445	

 c5c3//0
 %U3S=%93 >?@
 
 Xc588??&:;<
 #8C,@#AB
 U3S=112
 $
 tn
 

 
r   rq   c                       e Zd ZdZej
                  j                  ej
                  j                  fdedede	dej
                  j                  f   de	dej
                  j                  f   ddf
 fd	Zd
edefdZd
edefdZ xZS )SqueezeExcitationaE  
    This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
    Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.

    Args:
        input_channels (int): Number of channels in the input image
        squeeze_channels (int): Number of squeeze channels
        activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
        scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
    input_channelssqueeze_channels
activation.scale_activationr*   Nc                 H   t         |           t        |        t        j                  j                  d      | _        t        j                  j                  ||d      | _        t        j                  j                  ||d      | _	         |       | _
         |       | _        y )Nr,   )r   r   r	   r   re   AdaptiveAvgPool2davgpoolrh   fc1fc2ry   rz   )r   rw   rx   ry   rz   r   s        r   r   zSqueezeExcitation.__init__   st     	D!xx11!488??>3CQG88??#3^QG$, 0 2r   inputc                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }| j	                  |      S rm   )r}   r~   ry   r   rz   r   r   r4   s      r   _scalezSqueezeExcitation._scale   sI    U#&$$U++r   c                 .    | j                  |      }||z  S rm   )r   r   s      r   r5   zSqueezeExcitation.forward  s    E"u}r   )r9   r<   r=   r>   r   re   rg   Sigmoidr?   r   ri   r   r   r   r5   rE   rF   s   @r   rv   rv      s    	 6;XX]];@88;K;K33 3 S%((//12	3
 #3#783 
3,F ,v ,V  r   rv   c                        e Zd ZdZdej
                  j                  dddfdedee   de	e
dej
                  j                  f      d	e	e
dej
                  j                  f      d
e	e   dedef fdZ xZS )MLPa  This block implements the multi-layer perceptron (MLP) module.

    Args:
        in_channels (int): Number of channels of the input
        hidden_channels (List[int]): List of the hidden channel dimensions
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
            Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
        bias (bool): Whether to use bias in the linear layer. Default ``True``
        dropout (float): The probability for the dropout layer. Default: 0.0
    NTg        rJ   hidden_channelsrP   .rQ   rS   r   dropoutc                 J   |i nd|i}g }	|}
|d d D ]  }|	j                  t        j                  j                  |
||             ||	j                   ||             |	j                   |di |       |	j                  t        j                  j                  |fi |       |}
 |	j                  t        j                  j                  |
|d   |             |	j                  t        j                  j                  |fi |       t        |   |	  t        |        y )NrS   r-   )r   rW   )r_   r   re   LinearDropoutr   r   r	   )r   rJ   r   rP   rQ   rS   r   r   rd   rc   in_dim
hidden_dimr   s               r   r   zMLP.__init__  s     Y,@)#2. 	 JMM%((//&*4/HI%j45MM*4V45MM%((**7=f=>F	  	ehhoofob.AoMNehh&&w9&9:&!D!r   )r9   r<   r=   r>   r   re   rg   r?   rD   r   r   ri   rC   r@   r   rE   rF   s   @r   r   r     s    " @DEJXX]]"&"" c" Xc588??&:;<	"
 #8C,@#AB" $" " " "r   r   c                   >     e Zd ZdZdee   f fdZdedefdZ xZ	S )PermutezThis module returns a view of the tensor input with its dimensions permuted.

    Args:
        dims (List[int]): The desired ordering of dimensions
    dimsc                 0    t         |           || _        y rm   )r   r   r   )r   r   r   s     r   r   zPermute.__init__<  s    	r   r)   r*   c                 B    t        j                  || j                        S rm   )r   permuter   )r   r)   s     r   r5   zPermute.forward@  s    }}Q		**r   )
r9   r<   r=   r>   rD   r?   r   r   r5   rE   rF   s   @r   r   r   5  s+    T#Y + +F +r   r   )r`   collections.abcr   typingr   r   r   r   r   utilsr	   r
   re   
functionalinterpolateri   r   
SequentialrH   rk   rq   rv   r   r   rW   r   r   <module>r      s     $ , ,   5 hh!!--4T 4Tn6,, 6r/
- /
d.
- .
b$ $N*"%((

 *"Z+ehhoo +r   