
    kh5                        d dl Z d dlmZmZmZmZmZmZ d dlZd dlm	Z	 ddl
mZmZ ej                  j                  j                  Z G d dej                  j                         Z G d d	ej                  j$                        Z G d
 de      Z G d de      Z G d dej                  j                         Z G d dej                  j$                        Z G d dej                  j                         Zy)    N)CallableListOptionalSequenceTupleUnion)Tensor   )_log_api_usage_once_make_ntuplec                        e Zd ZdZ	 ddedef fdZdededede	d	e
e   d
e
e   de
e   f fdZdedefdZdefdZ xZS )FrozenBatchNorm2da!  
    BatchNorm2d where the batch statistics and the affine parameters are fixed

    Args:
        num_features (int): Number of features ``C`` from an expected input of size ``(N, C, H, W)``
        eps (float): a value added to the denominator for numerical stability. Default: 1e-5
    num_featuresepsc                 n   t         |           t        |        || _        | j	                  dt        j                  |             | j	                  dt        j                  |             | j	                  dt        j                  |             | j	                  dt        j                  |             y )Nweightbiasrunning_meanrunning_var)super__init__r   r   register_buffertorchoneszeros)selfr   r   	__class__s      P/var/www/teggl/fontify/venv/lib/python3.12/site-packages/torchvision/ops/misc.pyr   zFrozenBatchNorm2d.__init__   s    
 	D!Xuzz,'?@VU[[%>?^U[[-FG]EJJ|,DE    
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsc           	      H    |dz   }||v r||= t         	|   |||||||       y )Nnum_batches_tracked)r   _load_from_state_dict)
r   r    r!   r"   r#   r$   r%   r&   num_batches_tracked_keyr   s
            r   r)   z'FrozenBatchNorm2d._load_from_state_dict#   s?     #)+@"@"j023%oWa	
r   xreturnc                 R   | j                   j                  dddd      }| j                  j                  dddd      }| j                  j                  dddd      }| j                  j                  dddd      }||| j
                  z   j                         z  }|||z  z
  }||z  |z   S )N   )r   reshaper   r   r   r   rsqrt)r   r+   wbrvrmscaler   s           r   forwardzFrozenBatchNorm2d.forward5   s     KK2q!,IIaQ*%%aQ2&&q"a3R$((]))++2:~5y4r   c                     | j                   j                   d| j                  j                  d    d| j                   dS )N(r   z, eps=))r   __name__r   shaper   )r   s    r   __repr__zFrozenBatchNorm2d.__repr__@   s;    ..))*!DKK,=,=a,@+AzQRSSr   )gh㈵>)r;   
__module____qualname____doc__intfloatr   dictstrboolr   r)   r	   r7   r=   __classcell__r   s   @r   r   r      s     FF F

 
 	

 
 3i
 c
 I
$	  	 F 	 T# Tr   r   c                       e Zd Zddddej                  j
                  ej                  j                  dddej                  j                  f
dedede	ee
edf   f   d	e	ee
edf   f   d
ee	ee
edf   ef      dedeedej                  j                  f      deedej                  j                  f      de	ee
edf   f   dee   dee   dedej                  j                  f   ddf fdZ xZS )ConvNormActivation   r.   NTin_channelsout_channelskernel_size.stridepaddinggroups
norm_layeractivation_layerdilationinplacer   
conv_layerr,   c           
      N  	 |t        t              rt        	t              rdz
  dz  	z  }n\t        t              rt              n
t        	      }t	        |      t	        	|      	t        	fdt        |      D              }||d u } |||||	||      g}||j                   ||             ||
i nd|
i}|j                   |di |       t        | $  |  t        |        || _        | j                  t        k(  rt        j                  d       y y )Nr.   r
   c              3   @   K   | ]  }|   d z
  dz  |   z    yw)r.   r
   N ).0irS   rM   s     r   	<genexpr>z.ConvNormActivation.__init__.<locals>.<genexpr>\   s(     bAQ!!3 9HQK Gbs   )rS   rP   r   rT   zhDon't use ConvNormActivation directly, please use Conv2dNormActivation and Conv3dNormActivation instead.rX   )
isinstancerA   r   lenr   tuplerangeappendr   r   r   rL   r   rI   warningswarn)r   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   r   rU   	_conv_dimlayersparamsr   s      `     `      r   r   zConvNormActivation.__init__E   s5     ?+s+
8S0I&?q08;0:;0QC,WZ[cWd	*;	B')<bQVW`Qabb<%D !	
 !MM*\23'"?RG0DFMM*4V45&!D!(>>//MMz 0r   )r;   r>   r?   r   nnBatchNorm2dReLUConv2drA   r   r   r   rD   r   ModulerE   r   rF   rG   s   @r   rI   rI   D   s^   
 45./>B?Dxx?S?SEJXX]]01"&#5:XX__55 5 3c3h/0	5
 c5c?*+5 %U38_c 9:;5 5 Xc588??&:;<5 #8C,@#AB5 U38_,-5 $5 tn5 S%((//125 
5 5r   rI   c                       e Zd ZdZddddej
                  j                  ej
                  j                  dddf	dedede	ee
eef   f   d	e	ee
eef   f   d
ee	ee
eef   ef      dedeedej
                  j                  f      deedej
                  j                  f      de	ee
eef   f   dee   dee   ddf fdZ xZS )Conv2dNormActivationa  
    Configurable block used for Convolution2d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm2d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.

    rJ   r.   NTrK   rL   rM   rN   rO   rP   rQ   .rR   rS   rT   r   r,   c                 j    t         |   |||||||||	|
|t        j                  j                         y N)r   r   r   rf   ri   r   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   r   r   s               r   r   zConv2dNormActivation.__init__   >     	HHOO	
r   )r;   r>   r?   r@   r   rf   rg   rh   rA   r   r   r   rD   r   rj   rE   r   rF   rG   s   @r   rl   rl   }   s<   , 45./>B?Dxx?S?SEJXX]]01"&#

 
 3c3h/0	

 c5c?*+
 %U38_c 9:;
 
 Xc588??&:;<
 #8C,@#AB
 U38_,-
 $
 tn
 

 
r   rl   c                       e Zd ZdZddddej
                  j                  ej
                  j                  dddf	dedede	ee
eeef   f   d	e	ee
eeef   f   d
ee	ee
eeef   ef      dedeedej
                  j                  f      deedej
                  j                  f      de	ee
eeef   f   dee   dee   ddf fdZ xZS )Conv3dNormActivationa  
    Configurable block used for Convolution3d-Normalization-Activation blocks.

    Args:
        in_channels (int): Number of channels in the input video.
        out_channels (int): Number of channels produced by the Convolution-Normalization-Activation block
        kernel_size: (int, optional): Size of the convolving kernel. Default: 3
        stride (int, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of the input. Default: None, in which case it will be calculated as ``padding = (kernel_size - 1) // 2 * dilation``
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the convolution layer. If ``None`` this layer won't be used. Default: ``torch.nn.BatchNorm3d``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the conv layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        dilation (int): Spacing between kernel elements. Default: 1
        inplace (bool): Parameter for the activation layer, which can optionally do the operation in-place. Default ``True``
        bias (bool, optional): Whether to use bias in the convolution layer. By default, biases are included if ``norm_layer is None``.
    rJ   r.   NTrK   rL   rM   rN   rO   rP   rQ   .rR   rS   rT   r   r,   c                 j    t         |   |||||||||	|
|t        j                  j                         y rn   )r   r   r   rf   Conv3dro   s               r   r   zConv3dNormActivation.__init__   rp   r   )r;   r>   r?   r@   r   rf   BatchNorm3drh   rA   r   r   r   rD   r   rj   rE   r   rF   rG   s   @r   rr   rr      sH   * 9:34CG?Dxx?S?SEJXX]]56"&#

 
 3c3m 445	

 c5c3//0
 %U3S=%93 >?@
 
 Xc588??&:;<
 #8C,@#AB
 U3S=112
 $
 tn
 

 
r   rr   c                       e Zd ZdZej
                  j                  ej
                  j                  fdedede	dej
                  j                  f   de	dej
                  j                  f   ddf
 fd	Zd
edefdZd
edefdZ xZS )SqueezeExcitationaE  
    This block implements the Squeeze-and-Excitation block from https://arxiv.org/abs/1709.01507 (see Fig. 1).
    Parameters ``activation``, and ``scale_activation`` correspond to ``delta`` and ``sigma`` in eq. 3.

    Args:
        input_channels (int): Number of channels in the input image
        squeeze_channels (int): Number of squeeze channels
        activation (Callable[..., torch.nn.Module], optional): ``delta`` activation. Default: ``torch.nn.ReLU``
        scale_activation (Callable[..., torch.nn.Module]): ``sigma`` activation. Default: ``torch.nn.Sigmoid``
    input_channelssqueeze_channels
activation.scale_activationr,   Nc                 H   t         |           t        |        t        j                  j                  d      | _        t        j                  j                  ||d      | _        t        j                  j                  ||d      | _	         |       | _
         |       | _        y )Nr.   )r   r   r   r   rf   AdaptiveAvgPool2davgpoolri   fc1fc2rz   r{   )r   rx   ry   rz   r{   r   s        r   r   zSqueezeExcitation.__init__   st     	D!xx11!488??>3CQG88??#3^QG$, 0 2r   inputc                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }| j	                  |      S rn   )r~   r   rz   r   r{   r   r   r6   s      r   _scalezSqueezeExcitation._scale   sI    U#&$$U++r   c                 .    | j                  |      }||z  S rn   )r   r   s      r   r7   zSqueezeExcitation.forward  s    E"u}r   )r;   r>   r?   r@   r   rf   rh   SigmoidrA   r   rj   r   r	   r   r7   rF   rG   s   @r   rw   rw      s    	 6;XX]];@88;K;K33 3 S%((//12	3
 #3#783 
3,F ,v ,V  r   rw   c                        e Zd ZdZdej
                  j                  dddfdedee   de	e
dej
                  j                  f      d	e	e
dej
                  j                  f      d
e	e   dedef fdZ xZS )MLPa  This block implements the multi-layer perceptron (MLP) module.

    Args:
        in_channels (int): Number of channels of the input
        hidden_channels (List[int]): List of the hidden channel dimensions
        norm_layer (Callable[..., torch.nn.Module], optional): Norm layer that will be stacked on top of the linear layer. If ``None`` this layer won't be used. Default: ``None``
        activation_layer (Callable[..., torch.nn.Module], optional): Activation function which will be stacked on top of the normalization layer (if not None), otherwise on top of the linear layer. If ``None`` this layer won't be used. Default: ``torch.nn.ReLU``
        inplace (bool, optional): Parameter for the activation layer, which can optionally do the operation in-place.
            Default is ``None``, which uses the respective default values of the ``activation_layer`` and Dropout layer.
        bias (bool): Whether to use bias in the linear layer. Default ``True``
        dropout (float): The probability for the dropout layer. Default: 0.0
    NTg        rK   hidden_channelsrQ   .rR   rT   r   dropoutc                 J   |i nd|i}g }	|}
|d d D ]  }|	j                  t        j                  j                  |
||             ||	j                   ||             |	j                   |di |       |	j                  t        j                  j                  |fi |       |}
 |	j                  t        j                  j                  |
|d   |             |	j                  t        j                  j                  |fi |       t        |   |	  t        |        y )NrT   r/   )r   rX   )r`   r   rf   LinearDropoutr   r   r   )r   rK   r   rQ   rR   rT   r   r   re   rd   in_dim
hidden_dimr   s               r   r   zMLP.__init__  s     Y,@)#2. 	 JMM%((//&*4/HI%j45MM*4V45MM%((**7=f=>F	  	ehhoofob.AoMNehh&&w9&9:&!D!r   )r;   r>   r?   r@   r   rf   rh   rA   r   r   r   rj   rE   rB   r   rF   rG   s   @r   r   r     s    " @DEJXX]]"&"" c" Xc588??&:;<	"
 #8C,@#AB" $" " " "r   r   c                   >     e Zd ZdZdee   f fdZdedefdZ xZ	S )PermutezThis module returns a view of the tensor input with its dimensions permuted.

    Args:
        dims (List[int]): The desired ordering of dimensions
    dimsc                 0    t         |           || _        y rn   )r   r   r   )r   r   r   s     r   r   zPermute.__init__;  s    	r   r+   r,   c                 B    t        j                  || j                        S rn   )r   permuter   )r   r+   s     r   r7   zPermute.forward?  s    }}Q		**r   )
r;   r>   r?   r@   r   rA   r   r	   r7   rF   rG   s   @r   r   r   4  s+    T#Y + +F +r   r   )ra   typingr   r   r   r   r   r   r   r	   utilsr   r   rf   
functionalinterpolaterj   r   
SequentialrI   rl   rr   rw   r   r   rX   r   r   <module>r      s     C C   5 hh!!--4T 4Tn6,, 6r/
- /
d.
- .
b$ $N*"%((

 *"Z+ehhoo +r   