
    khR                        U d dl Z d dlZd dlmZ d dlmZ d dlmZmZm	Z	m
Z
mZmZmZmZ d dlZd dlmZmZ d dlmZ ddlmZmZ dd	lmZmZ dd
lmZ ddlmZmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% g dZ&e G d d             Z' G d de'      Z( G d de'      Z) G d dejT                        Z+ G d dejT                        Z, G d dejT                        Z-deee(e)f      de.dee/   dee   d e0d!ed"e-fd#Z1d$e2d!ed"eeee(e)f      ee/   f   fd%Z3d&e!iZ4e	e2ef   e5d'<   i e4d(d)d*Z6i e4d+d,d*Z7 G d- d.e      Z8 G d/ d0e      Z9 G d1 d2e      Z: G d3 d4e      Z; G d5 d6e      Z< G d7 d8e      Z= G d9 d:e      Z> G d; d<e      Z? G d= d>e      Z@ G d? d@e      ZA G dA dBe      ZB e        e%dCe8j                  fD      ddEdFdee8   d e0d!ed"e-fdG              ZD e        e%dCe9j                  fD      ddEdFdee9   d e0d!ed"e-fdH              ZE e        e%dCe:j                  fD      ddEdFdee:   d e0d!ed"e-fdI              ZF e        e%dCe;j                  fD      ddEdFdee;   d e0d!ed"e-fdJ              ZG e        e%dCe<j                  fD      ddEdFdee<   d e0d!ed"e-fdK              ZH e        e%dCe=j                  fD      ddEdFdee=   d e0d!ed"e-fdL              ZI e        e%dCe>j                  fD      ddEdFdee>   d e0d!ed"e-fdM              ZJ e        e%dCe?j                  fD      ddEdFdee?   d e0d!ed"e-fdN              ZK e        e%dCe@j                  fD      ddEdFdee@   d e0d!ed"e-fdO              ZL e        e%dCeAj                  fD      ddEdFdeeA   d e0d!ed"e-fdP              ZM e        e%dCeBj                  fD      ddEdFdeeB   d e0d!ed"e-fdQ              ZNy)R    N)	dataclass)partial)AnyCallableDictListOptionalSequenceTupleUnion)nnTensor)StochasticDepth   )Conv2dNormActivationSqueezeExcitation)ImageClassificationInterpolationMode)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interface)EfficientNetEfficientNet_B0_WeightsEfficientNet_B1_WeightsEfficientNet_B2_WeightsEfficientNet_B3_WeightsEfficientNet_B4_WeightsEfficientNet_B5_WeightsEfficientNet_B6_WeightsEfficientNet_B7_WeightsEfficientNet_V2_S_WeightsEfficientNet_V2_M_WeightsEfficientNet_V2_L_Weightsefficientnet_b0efficientnet_b1efficientnet_b2efficientnet_b3efficientnet_b4efficientnet_b5efficientnet_b6efficientnet_b7efficientnet_v2_sefficientnet_v2_mefficientnet_v2_lc            
           e Zd ZU eed<   eed<   eed<   eed<   eed<   eed<   edej                  f   ed<   e	dd
edede
e   defd       Zy	)_MBConvConfigexpand_ratiokernelstrideinput_channelsout_channels
num_layers.blockNchannels
width_mult	min_valuereturnc                 "    t        | |z  d|      S )N   )r   )r>   r?   r@   s      [/var/www/teggl/fontify/venv/lib/python3.12/site-packages/torchvision/models/efficientnet.pyadjust_channelsz_MBConvConfig.adjust_channels8   s    x*4aCC    N)__name__
__module____qualname__float__annotations__intr   r   Modulestaticmethodr	   rE    rF   rD   r6   r6   .   so    KKOCN##D# D5 DXc] D^a D DrF   r6   c                        e Zd Z	 	 	 ddededededededed	ed
eedej                  f      ddf fdZ	e
ded	efd       Z xZS )MBConvConfigNr7   r8   r9   r:   r;   r<   r?   
depth_multr=   .rA   c
           	          | j                  ||      }| j                  ||      }| j                  ||      }|	t        }	t        
|   |||||||	       y rG   )rE   adjust_depthMBConvsuper__init__)selfr7   r8   r9   r:   r;   r<   r?   rS   r=   	__class__s             rD   rX   zMBConvConfig.__init__?   s`     --njI++L*E&&z:>
=Evv~|U_afgrF   c                 D    t        t        j                  | |z              S rG   )rM   mathceil)r<   rS   s     rD   rU   zMBConvConfig.adjust_depthR   s    499Z*4566rF   )      ?r^   N)rH   rI   rJ   rK   rM   r	   r   r   rN   rX   rO   rU   __classcell__rZ   s   @rD   rR   rR   =   s      48hh h 	h
 h h h h h bii01h 
h& 7 7% 7 7rF   rR   c                   h     e Zd Z	 ddededededededeed	ej                  f      d
df fdZ	 xZ
S )FusedMBConvConfigNr7   r8   r9   r:   r;   r<   r=   .rA   c           	      @    |t         }t        | 	  |||||||       y rG   )FusedMBConvrW   rX   )	rY   r7   r8   r9   r:   r;   r<   r=   rZ   s	           rD   rX   zFusedMBConvConfig.__init__Y   s*     =Evv~|U_afgrF   rG   )rH   rI   rJ   rK   rM   r	   r   r   rN   rX   r_   r`   s   @rD   rb   rb   W   s|     59hh h 	h
 h h h bii01h 
h hrF   rb   c                        e Zd Zefdedededej                  f   dedej                  f   ddf
 fdZ	d	e
de
fd
Z xZS )rV   cnfstochastic_depth_prob
norm_layer.se_layerrA   Nc                 ~   t         	|           d|j                  cxk  rdk  st        d       t        d      |j                  dk(  xr |j                  |j
                  k(  | _        g }t        j                  }|j                  |j                  |j                        }||j                  k7  r)|j                  t        |j                  |d||             |j                  t        |||j                  |j                  |||             t        d|j                  dz        }|j                   |||t        t        j                  d      	             |j                  t        ||j
                  d|d              t        j                   | | _        t%        |d
      | _        |j
                  | _        y )Nr   r   illegal stride valuekernel_sizerh   activation_layer)rm   r9   groupsrh   rn      T)inplace)
activationrow)rW   rX   r9   
ValueErrorr:   r;   use_res_connectr   SiLUrE   r7   appendr   r8   maxr   
Sequentialr=   r   stochastic_depth)
rY   rf   rg   rh   ri   layersrn   expanded_channelssqueeze_channelsrZ   s
            rD   rX   zMBConv.__init__i   s    	SZZ$1$344 %344"zzQY33E3EIYIY3Y"$77  //0B0BCDTDTU 2 22MM$&&% !)%5 	 !!JJzz(%!1
	
 q#"4"4"9:h02BwWYW^W^hlOmno 	 !3#3#3zlp	
 ]]F+
 /0Eu M,,rF   inputc                 l    | j                  |      }| j                  r| j                  |      }||z  }|S rG   r=   ru   rz   rY   r~   results      rD   forwardzMBConv.forward   7    E"**62FeOFrF   )rH   rI   rJ   r   rR   rK   r   r   rN   rX   r   r   r_   r`   s   @rD   rV   rV   h   sk     .?8-8-  %8- S"))^,	8-
 3		>*8- 
8-tV  rF   rV   c                   ^     e Zd Zdedededej                  f   ddf fdZde	de	fd	Z
 xZS )
rd   rf   rg   rh   .rA   Nc           
      8   t         |           d|j                  cxk  rdk  st        d       t        d      |j                  dk(  xr |j                  |j
                  k(  | _        g }t        j                  }|j                  |j                  |j                        }||j                  k7  rh|j                  t        |j                  ||j                  |j                  ||             |j                  t        ||j
                  d|d              nH|j                  t        |j                  |j
                  |j                  |j                  ||             t        j                  | | _        t!        |d      | _        |j
                  | _        y )Nr   r   rk   rm   r9   rh   rn   rl   rs   )rW   rX   r9   rt   r:   r;   ru   r   rv   rE   r7   rw   r   r8   ry   r=   r   rz   )rY   rf   rg   rh   r{   rn   r|   rZ   s          rD   rX   zFusedMBConv.__init__   si    	SZZ$1$344 %344"zzQY33E3EIYIY3Y"$77//0B0BCDTDTU 2 22MM$&&% #

::)%5	 MM$%s'7'7QS]pt MM$&&$$ #

::)%5	 ]]F+
 /0Eu M,,rF   r~   c                 l    | j                  |      }| j                  r| j                  |      }||z  }|S rG   r   r   s      rD   r   zFusedMBConv.forward   r   rF   )rH   rI   rJ   rb   rK   r   r   rN   rX   r   r   r_   r`   s   @rD   rd   rd      sO    2-2-  %2- S"))^,	2-
 
2-hV  rF   rd   c                        e Zd Z	 	 	 	 ddeeeef      dededede	e
dej                  f      de	e   d	df fd
Zded	efdZded	efdZ xZS )r   Ninverted_residual_settingdropoutrg   num_classesrh   .last_channelrA   c           
         t         |           t        |        |st        d      t	        |t
              r't        |D cg c]  }t	        |t               c}      st        d      |t        j                  }g }|d   j                  }	|j                  t        d|	dd|t        j                               t        d |D              }
d}|D ]  }g }t!        |j"                        D ]i  }t%        j$                  |      }|r|j&                  |_        d	|_        |t+        |      z  |
z  }|j                  |j-                  |||             |d	z  }k |j                  t        j.                  |         |d
   j&                  }||nd|z  }|j                  t        ||d	|t        j                               t        j.                  | | _        t        j2                  d	      | _        t        j.                  t        j6                  |d      t        j8                  ||            | _        | j=                         D ]  }t	        |t        j>                        rbt        j@                  jC                  |jD                  d       |jF                  Vt        j@                  jI                  |jF                         t	        |t        j                  t        jJ                  f      rSt        j@                  jM                  |jD                         t        j@                  jI                  |jF                         t	        |t        j8                        sdtO        jP                  |jR                        z  }t        j@                  jU                  |jD                  | |       t        j@                  jI                  |jF                          yc c}w )a  
        EfficientNet V1 and V2 main class

        Args:
            inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
            dropout (float): The droupout probability
            stochastic_depth_prob (float): The stochastic depth probability
            num_classes (int): Number of classes
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            last_channel (int): The number of channels on the penultimate layer
        z1The inverted_residual_setting should not be emptyz:The inverted_residual_setting should be List[MBConvConfig]Nr      r   r   c              3   4   K   | ]  }|j                     y wrG   )r<   ).0rf   s     rD   	<genexpr>z(EfficientNet.__init__.<locals>.<genexpr>  s      UC Us   r   rp   rl   T)prq   fan_out)moder^   )+rW   rX   r   rt   
isinstancer
   allr6   	TypeErrorr   BatchNorm2dr:   rw   r   rv   sumranger<   copyr;   r9   rK   r=   ry   featuresAdaptiveAvgPool2davgpoolDropoutLinear
classifiermodulesConv2dinitkaiming_normal_weightbiaszeros_	GroupNormones_r\   sqrtout_featuresuniform_)rY   r   r   rg   r   rh   r   sr{   firstconv_output_channelstotal_stage_blocksstage_block_idrf   stage_	block_cnfsd_problastconv_input_channelslastconv_output_channelsm
init_rangerZ   s                        rD   rX   zEfficientNet.__init__   s    ( 	D!(PQQ0(;;TUaZ=1UVXYYJ"$ %>a$@$O$O! ,!AR\oqovov	
 ! U;T UU, 	1C%'E3>>* $ IIcN	 /8/E/EI,'(I$ 0%2GGJ\\Y__YLM!#$ MM"--/0#	1( #<B"?"L"L3?3K<QRUlQl  '(%!#	
 v.++A.--JJ$/II.<

  	'A!RYY'''y'A66%GGNN166*A=>ahh'qvv&Aryy) 499Q^^#<<
  J;
Cqvv&	'w Vs   Oxc                     | j                  |      }| j                  |      }t        j                  |d      }| j	                  |      }|S )Nr   )r   r   torchflattenr   rY   r   s     rD   _forward_implzEfficientNet._forward_implL  s@    MM!LLOMM!QOOArF   c                 $    | j                  |      S rG   )r   r   s     rD   r   zEfficientNet.forwardV  s    !!!$$rF   )皙?i  NN)rH   rI   rJ   r
   r   rR   rb   rK   rM   r	   r   r   rN   rX   r   r   r   r_   r`   s   @rD   r   r      s    
 (+9=&*a'#+E,@Q2Q,R#Sa' a'  %	a'
 a' Xc299n56a' sma' 
a'Fv & % %F %rF   r   r   r   r   weightsprogresskwargsrA   c                     |#t        |dt        |j                  d                t        | |fd|i|}|"|j	                  |j                  |d             |S )Nr   
categoriesr   T)r   
check_hash)r   lenmetar   load_state_dictget_state_dict)r   r   r   r   r   r   models          rD   _efficientnetr   Z  sf     fmSl9S5TU2Ga,aZ`aEg44hSW4XYLrF   archc                 F   | j                  d      rt        t        |j                  d      |j                  d            } |dddddd       |d	dd
ddd
       |d	dd
ddd
       |d	dd
ddd       |d	ddddd       |d	dd
ddd       |d	ddddd      g}d }||fS | j                  d      rbt	        dddddd
      t	        ddd
ddd      t	        ddd
ddd      t        ddd
ddd	      t        d	ddddd      t        d	dd
ddd      g}d}||fS | j                  d      rqt	        dddddd      t	        ddd
ddd      t	        ddd
ddd      t        ddd
ddd      t        d	ddddd      t        d	dd
dd d!      t        d	ddd d"d      g}d}||fS | j                  d#      rqt	        dddddd      t	        ddd
ddd      t	        ddd
dd$d      t        ddd
d$dd%      t        d	dddd&d'      t        d	dd
d&d(d)      t        d	ddd(d*d      g}d}||fS t        d+|        ),Nefficientnet_br?   rS   r?   rS   r   r             r         (   P   p      rp   @  r2   0   @         	         i   r3            i0     i   r4   `   
              i  zUnsupported model type )
startswithr   rR   poprb   rt   )r   r   
bneck_confr   r   s        rD   _efficientnet_confr   m  s   
 '(\fjj6N[a[e[efr[st
q!QB*q!QB*q!QB*q!QB*q!QC+q!QS!,q!QS!,%
! H %l22G 
,	-aAr2q1aAr2q1aAr2q1Aq"c1-Aq#sA.Aq#sB/%
! 4 %l223 
,	-aAr2q1aAr2q1aAr2q1Aq"c1-Aq#sB/Aq#sB/Aq#sA.%
!  %l22 
,	-aAr2q1aAr2q1aAr2q1Aq"c2.Aq#sB/Aq#sB/Aq#sA.%
!  %l22 24&9::rF   r   _COMMON_META)r   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1)min_sizerecipe)!   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r   zJhttps://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pthr   r   	crop_sizeresize_sizeinterpolationidP ImageNet-1Kg?5^IlS@g5^IbW@zacc@1zacc@5gNbX9?g~jts4@1These weights are ported from the original paper.
num_params_metrics_ops
_file_size_docsurl
transformsr   NrH   rI   rJ   r   r   r   r   BICUBIC_COMMON_META_V1IMAGENET1K_V1DEFAULTrP   rF   rD   r   r     a    X3CO`OhOh


!##   L
M( GrF   r   c                       e Zd Z ed eeddej                        i eddddd	id
ddd      Z	 ed eeddej                        i edddddd	id
ddd      ZeZy)r    zJhttps://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth   r   r   iv r   g+S@gClW@r   gCl?gM">@r   r   r  z@https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth   zOhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuninggʡS@gƻW@gA`">@$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            )r   r   r  r  r  r  N)rH   rI   rJ   r   r   r   r   r	  r
  r  BILINEARIMAGENET1K_V2r  rP   rF   rD   r    r      s    X3CO`OhOh


!##   L
M( N3CO`OiOi


!g##   
M0 GrF   r    c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r!   zJhttps://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pthi   r   i r   gx&T@gp=
W@r   g rh?gʡEA@r   r   r  Nr  rP   rF   rD   r!   r!     r  rF   r!   c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r"   zJhttps://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pthi,  r   r   i r   gnT@g~jtX@r   gZd;?gd;OG@r   r   r  Nr  rP   rF   rD   r"   r"     a    X3CO`OhOh


"##   L
M( GrF   r"   c                   l    e Zd Z ed eeddej                        i eddddd	id
ddd      Z	e	Z
y)r#   zJhttps://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pthi|  r   r   i0!'r   gjtT@gt&X@r   g~jt@gKR@r   r   r  Nr  rP   rF   rD   r#   r#   /  r  rF   r#   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r$   zJhttps://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pthi  r   ir   g#~jT@gx&1(X@r   gx&1$@gK7]@r   r   r  Nr  rP   rF   rD   r$   r$   G  a    X3CO`OhOh


"##  !L
M( GrF   r$   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r%   zJhttps://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pthi  r   ir   gn U@gv:X@r   g rh3@g$d@r   r   r  Nr  rP   rF   rD   r%   r%   _  r  rF   r%   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r&   zJhttps://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pthiX  r   icr   g+U@g'1:X@r   gsh|B@go@r   r   r  Nr  rP   rF   rD   r&   r&   w  r  rF   r&   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r'   zBhttps://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pthr   r   i8nGr   g;OU@gx&18X@r   gZd @gVT@r  r   r  NrH   rI   rJ   r   r   r   r   r  _COMMON_META_V2r  r  rP   rF   rD   r'   r'     se    P+44	


"##   
M4 GrF   r'   c                   l    e Zd Z ed eeddej                        i edddddid	d
dd      Z	e	Z
y)r(   zBhttps://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth  r   i:r   gI+GU@gDlIX@r   gE8@gQ j@r  r   r  Nr  rP   rF   rD   r(   r(     se    P+44	


"##   
M4 GrF   r(   c                   p    e Zd Z ed eeddej                  dd      i eddddd	id
ddd      Z	e	Z
y)r)   zBhttps://download.pytorch.org/models/efficientnet_v2_l-59c71312.pthr   )      ?r"  r"  )r   r   r   meanstdiHfr   gʡEsU@gOnrX@r   g
ףp=
L@gI+i|@r   r   r  N)rH   rI   rJ   r   r   r   r   r	  r  r  r  rP   rF   rD   r)   r)     si    P+33 


###  !L
M0 GrF   r)   
pretrained)r   T)r   r   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B0_Weights
        :members:
    r*   r^   r   r   r   )r   verifyr   r   r   r   r   r   r   r   s        rD   r*   r*     W    . &,,W5G.@AR_bor.s+|!6::i#=|WV^bh rF   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B1_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B1_Weights
        :members:
    r+   r^   皙?r   r   r   )r    r'  r   r   r   r(  s        rD   r+   r+     r)  rF   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B2_Weights
        :members:
    r,   r+  333333?r   r   333333?)r!   r'  r   r   r   r(  s        rD   r,   r,   &  r)  rF   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B3_Weights
        :members:
    r-   r-  ffffff?r   r   r.  )r"   r'  r   r   r   r(  s        rD   r-   r-   E  \    . &,,W5G.@AR_bor.s+|!

9c"  rF   c                     t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fi |S )a  EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B4_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B4_Weights
        :members:
    r.   r0  ?r   r   皙?)r#   r'  r   r   r   r(  s        rD   r.   r.   i  r1  rF   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B5_Weights
        :members:
    r/   g?g@r   r   r4  rh   MbP?{Gz?epsmomentum)r$   r'  r   r   r   r   r   r   r(  s        rD   r/   r/     s    . &,,W5G.@AR_bor.s+|!

9c" 2>>utD  rF   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B6_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B6_Weights
        :members:
    r0   r3  g@r   r   r"  rh   r6  r7  r8  )r%   r'  r   r   r   r   r   r   r(  s        rD   r0   r0     r;  rF   c           	          t         j                  |       } t        ddd      \  }}t        ||j	                  dd      || |fdt        t        j                  dd	
      i|S )a  EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B7_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B7_Weights
        :members:
    r1   g       @g@r   r   r"  rh   r6  r7  r8  )r&   r'  r   r   r   r   r   r   r(  s        rD   r1   r1     r;  rF   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-S architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
        :members:
    r2   r   r   rh   r6  r9  )r'   r'  r   r   r   r   r   r   r(  s        rD   r2   r2     k    0 (..w7G.@AT.U+|!

9c" 2>>u5  rF   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-M architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
        :members:
    r3   r   r.  rh   r6  r?  )r(   r'  r   r   r   r   r   r   r(  s        rD   r3   r3   "  r@  rF   c                     t         j                  |       } t        d      \  }}t        ||j	                  dd      || |fdt        t        j                  d      i|S )a  
    Constructs an EfficientNetV2-L architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
        :members:
    r4   r   r4  rh   r6  r?  )r)   r'  r   r   r   r   r   r   r(  s        rD   r4   r4   H  r@  rF   )Or   r\   dataclassesr   	functoolsr   typingr   r   r   r   r	   r
   r   r   r   r   r   torchvision.opsr   ops.miscr   r   transforms._presetsr   r   utilsr   _apir   r   r   _metar   _utilsr   r   r   __all__r6   rR   rb   rN   rV   rd   r   rK   rM   boolr   strr   r   rL   r
  r  r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r  r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   rP   rF   rD   <module>rP     s     !  N N N   + > H ' 6 6 ' S S6 D D D7= 74h h"@RYY @F:")) :zo%299 o%d'l<M.M(NO 3- k"	
   &43
4343 8E,(99:;Xc]JK43p & d38n 
eek 0-k -`k 0k 0k 0k 0k 0k 0 < < : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X : ,0G0U0U!VW48401DH[^ X D ,0G0U0U!VW48401DH[^ X D ,0G0U0U!VW484 01 DH [^   X  F ,0G0U0U!VW484 01 DH [^   X  F ,0G0U0U!VW484 01 DH [^   X  F ,0I0W0W!XY6:T!23!FJ!]`!! Z !H ,0I0W0W!XY6:T!23!FJ!]`!! Z !H ,0I0W0W!XY6:T!23!FJ!]`!! Z !rF   