
    kh:                     P   d dl mZ d dlmZmZmZ d dlZd dlmZ d dlm	Z
 ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZmZ ddlmZmZmZ ddlmZmZmZmZm Z  ddlm!Z! ddl"m#Z# g dZ$ G d de!      Z% G d dejL                        Z' G d dejL                        Z( G d dejL                        Z) G d dejT                        Z+dede,dee-   de%fd Z.ed!d"d#Z/ G d$ d%e      Z0 G d& d'e      Z1 G d( d)e      Z2dede,dee-   de%fd*Z3 e        ed+e0jh                  fd,e jj                  f-      dd.dde jj                  d/d0ee0   d1e-dee,   d2ee-   d3ee    d4ede%fd5              Z6 e        ed+e1jh                  fd,ejj                  f-      dd.ddejj                  d/d0ee1   d1e-dee,   d2ee-   d3ee   d4ede%fd6              Z7 e        ed+e2jh                  fd,ejj                  f-      dd.ddejj                  d/d0ee2   d1e-dee,   d2ee-   d3ee   d4ede%fd7              Z8y)8    )partial)AnyOptionalSequenceN)nn)
functional   )SemanticSegmentation   )register_modelWeightsWeightsEnum)_VOC_CATEGORIES)_ovewrite_value_paramhandle_legacy_interfaceIntermediateLayerGetter)mobilenet_v3_largeMobileNet_V3_Large_WeightsMobileNetV3)ResNet	resnet101ResNet101_Weightsresnet50ResNet50_Weights   )_SimpleSegmentationModel)FCNHead)	DeepLabV3DeepLabV3_ResNet50_WeightsDeepLabV3_ResNet101_Weights$DeepLabV3_MobileNet_V3_Large_Weightsdeeplabv3_mobilenet_v3_largedeeplabv3_resnet50deeplabv3_resnet101c                       e Zd ZdZy)r   a  
    Implements DeepLabV3 model from
    `"Rethinking Atrous Convolution for Semantic Image Segmentation"
    <https://arxiv.org/abs/1706.05587>`_.

    Args:
        backbone (nn.Module): the network used to compute the features for the model.
            The backbone should return an OrderedDict[Tensor], with the key being
            "out" for the last feature map used, and "aux" if an auxiliary classifier
            is used.
        classifier (nn.Module): module that takes the "out" element returned from
            the backbone and returns a dense prediction.
        aux_classifier (nn.Module, optional): auxiliary classifier used during training
    N)__name__
__module____qualname____doc__     e/var/www/teggl/fontify/venv/lib/python3.12/site-packages/torchvision/models/segmentation/deeplabv3.pyr   r      s     	r+   r   c            	       8     e Zd Zddededee   ddf fdZ xZS )DeepLabHeadin_channelsnum_classesatrous_ratesreturnNc                     t         |   t        ||      t        j                  ddddd      t        j
                  d      t        j                         t        j                  d|d             y )N   r	   r   F)paddingbias)super__init__ASPPr   Conv2dBatchNorm2dReLU)selfr/   r0   r1   	__class__s       r,   r8   zDeepLabHead.__init__1   sS    l+IIc3159NN3GGIIIc;*	
r+   ))      $   )r&   r'   r(   intr   r8   __classcell__r>   s   @r,   r.   r.   0   s/    
C 
c 
RU 
jn 
 
r+   r.   c                   0     e Zd Zdedededdf fdZ xZS )ASPPConvr/   out_channelsdilationr2   Nc                     t        j                  ||d||d      t        j                  |      t        j                         g}t	        |   |  y )Nr	   F)r5   rH   r6   )r   r:   r;   r<   r7   r8   )r=   r/   rG   rH   modulesr>   s        r,   r8   zASPPConv.__init__<   sE    IIk<Hx^cdNN<(GGI

 	'"r+   )r&   r'   r(   rB   r8   rC   rD   s   @r,   rF   rF   ;   s)    #C #s #c #d # #r+   rF   c                   d     e Zd Zdededdf fdZdej                  dej                  fdZ xZS )ASPPPoolingr/   rG   r2   Nc           	          t         |   t        j                  d      t        j                  ||dd      t        j
                  |      t        j                                y )Nr   Fr6   )r7   r8   r   AdaptiveAvgPool2dr:   r;   r<   )r=   r/   rG   r>   s      r,   r8   zASPPPooling.__init__F   sE      #IIk<?NN<(GGI		
r+   xc                 p    |j                   dd  }| D ]
  } ||      } t        j                  ||dd      S )NbilinearF)sizemodealign_corners)shapeFinterpolate)r=   rP   rT   mods       r,   forwardzASPPPooling.forwardN   s>    wwrs| 	CAA	}}QT
%PPr+   )	r&   r'   r(   rB   r8   torchTensorr[   rC   rD   s   @r,   rL   rL   E   s;    
C 
s 
t 
Q Q%,, Qr+   rL   c            	       p     e Zd Zd	dedee   deddf fdZdej                  dej                  fdZ xZ	S )
r9   r/   r1   rG   r2   Nc           
         t         |           g }|j                  t        j                  t        j
                  ||dd      t        j                  |      t        j                                      t        |      }|D ]  }|j                  t        |||               |j                  t        ||             t        j                  |      | _        t        j                  t        j
                  t        | j                        |z  |dd      t        j                  |      t        j                         t        j                  d            | _        y )Nr   FrN   g      ?)r7   r8   appendr   
Sequentialr:   r;   r<   tuplerF   rL   
ModuleListconvslenDropoutproject)r=   r/   r1   rG   rJ   ratesrater>   s          r,   r8   zASPP.__init__V   s    MM"))KquMr~~^jOkmomtmtmvw	
 l# 	FDNN8KtDE	F 	{;=>]]7+
}}IIc$**o4lAERNN<(GGIJJsO	
r+   rP   c                     g }| j                   D ]  }|j                   ||              t        j                  |d      }| j	                  |      S )Nr   )dim)rd   r`   r\   catrg   )r=   rP   _resconvress        r,   r[   zASPP.forwardl   sI    JJ 	!DKKQ 	!ii!$||C  r+   )r4   )
r&   r'   r(   rB   r   r8   r\   r]   r[   rC   rD   s   @r,   r9   r9   U   sE    
C 
x} 
TW 
bf 
,! !%,, !r+   r9   backboner0   auxr2   c                     ddi}|rd|d<   t        | |      } |rt        d|      nd }t        d|      }t        | ||      S )Nlayer4outrq   layer3return_layersi   i   )r   r   r.   r   )rp   r0   rq   rw   aux_classifier
classifiers         r,   _deeplabv3_resnetrz   t   sR    
 u%M
"'h&x}MH36WT;/DNT;/JXz>::r+   )r   r   z
        These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC
        dataset.
    )
categoriesmin_size_docsc                   T    e Zd Z ed eed      i edddddd	id
dd      ZeZy)r   zHhttps://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth  resize_sizeijzVhttps://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet50COCO-val2017-VOC-labelsgP@皙W@miou	pixel_accgvWf@gGzd@
num_paramsrecipe_metrics_ops
_file_sizeurl
transformsmetaN	r&   r'   r(   r   r   r
   _COMMON_METACOCO_WITH_VOC_LABELS_V1DEFAULTr*   r+   r,   r   r      sU    %V/SA

"n) !%, !
" &Gr+   r   c                   T    e Zd Z ed eed      i edddddd	id
dd      ZeZy)r    zIhttps://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pthr   r   ijzQhttps://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet101r   gP@r   r   gS+p@gm&m@r   r   Nr   r*   r+   r,   r    r       sU    %W/SA

"i) !%, !
" &Gr+   r    c                   T    e Zd Z ed eed      i edddddd	id
dd      ZeZy)r!   zMhttps://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pthr   r   iPK z`https://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_mobilenet_v3_larger   gfffff&N@gV@r   gCl$@gJ+&E@r   r   Nr   r*   r+   r,   r!   r!      sU    %[/SA

"x) !%,  
" &Gr+   r!   c           
         | j                   } dgt        |       D cg c]  \  }}t        |dd      s| c}}z   t        |       dz
  gz   }|d   }| |   j                  }|d   }| |   j                  }	t        |      di}
|rd|
t        |      <   t        | |
	      } |rt        |	|      nd }t        ||      }t        | ||      S c c}}w )
Nr   _is_cnFr   rt   rq   rv   )
features	enumerategetattrre   rG   strr   r   r.   r   )rp   r0   rq   ibstage_indicesout_posout_inplanesaux_posaux_inplanesrw   rx   ry   s                r,   _deeplabv3_mobilenetv3r      s    
   H C8)<\A8UZ@[1\\`cdl`mpq`q_rrMBGG$11LBGG$11L\5)M
&+c'l#&x}MH;>W\;7DN\;7JXz>:: ]s
   CC
pretrainedpretrained_backbone)weightsweights_backboneT)r   progressr0   aux_lossr   r   r   r   r   kwargsc                 L   t         j                  |       } t        j                  |      }| 3d}t        d|t	        | j
                  d               }t        d|d      }n|d}t        |g d      }t        |||      }| "|j                  | j                  |d	             |S )
ad  Constructs a DeepLabV3 model with a ResNet-50 backbone.

    .. betastatus:: segmentation module

    Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.

    Args:
        weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.segmentation.DeepLabV3_ResNet50_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        num_classes (int, optional): number of output classes of the model (including the background)
        aux_loss (bool, optional): If True, it uses an auxiliary loss
        weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained weights for the
            backbone
        **kwargs: unused

    .. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet50_Weights
        :members:
    Nr0   r{   r   T   FTTr   replace_stride_with_dilationr   
check_hash)
r   verifyr   r   re   r   r   rz   load_state_dictget_state_dictr   r   r0   r   r   r   rp   models           r,   r#   r#      s    J )//8G'../?@+M;GLLYeLfHgh(XtD		 0ObcHhX>Eg44hSW4XYLr+   c                 L   t         j                  |       } t        j                  |      }| 3d}t        d|t	        | j
                  d               }t        d|d      }n|d}t        |g d      }t        |||      }| "|j                  | j                  |d	             |S )
ai  Constructs a DeepLabV3 model with a ResNet-101 backbone.

    .. betastatus:: segmentation module

    Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.

    Args:
        weights (:class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.segmentation.DeepLabV3_ResNet101_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        num_classes (int, optional): number of output classes of the model (including the background)
        aux_loss (bool, optional): If True, it uses an auxiliary loss
        weights_backbone (:class:`~torchvision.models.ResNet101_Weights`, optional): The pretrained weights for the
            backbone
        **kwargs: unused

    .. autoclass:: torchvision.models.segmentation.DeepLabV3_ResNet101_Weights
        :members:
    Nr0   r{   r   Tr   r   r   r   )
r    r   r   r   re   r   r   rz   r   r   r   s           r,   r$   r$     s    J *009G(//0@A+M;GLLYeLfHgh(XtD		!1PcdHhX>Eg44hSW4XYLr+   c                 H   t         j                  |       } t        j                  |      }| 3d}t        d|t	        | j
                  d               }t        d|d      }n|d}t        |d      }t        |||      }| "|j                  | j                  |d             |S )	ak  Constructs a DeepLabV3 model with a MobileNetV3-Large backbone.

    Reference: `Rethinking Atrous Convolution for Semantic Image Segmentation <https://arxiv.org/abs/1706.05587>`__.

    Args:
        weights (:class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        num_classes (int, optional): number of output classes of the model (including the background)
        aux_loss (bool, optional): If True, it uses an auxiliary loss
        weights_backbone (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The pretrained weights
            for the backbone
        **kwargs: unused

    .. autoclass:: torchvision.models.segmentation.DeepLabV3_MobileNet_V3_Large_Weights
        :members:
    Nr0   r{   r   Tr   )r   dilatedr   )
r!   r   r   r   re   r   r   r   r   r   r   s           r,   r"   r"   S  s    F 399'BG1889IJ+M;GLLYeLfHgh(XtD		!*:DIH"8[(CEg44hSW4XYLr+   )9	functoolsr   typingr   r   r   r\   r   torch.nnr   rX   transforms._presetsr
   _apir   r   r   _metar   _utilsr   r   r   mobilenetv3r   r   r   resnetr   r   r   r   r   r   fcnr   __all__r   ra   r.   rF   rL   Moduler9   rB   boolrz   r   r   r    r!   r   r   IMAGENET1K_V1r#   r$   r"   r*   r+   r,   <module>r      s9    * *   $ 7 7 7 # \ \ U U U U , 	( 	&
"-- 
#r}} #Q"-- Q !299 !>;;; 
$; 	;  "& &*&+ &*&; &*;;; 
$; 	;. 5MMN+-=-K-KL 59!%#3C3Q3Q0010 0 #	0
 tn0 /00 0 0	 
0f 6NNO+->-L-LM 6:!%#4E4S4S0120 0 #	0
 tn0 010 0 0	 
0f ?WWX+-G-U-UV ?C!%#=W=e=e.:;. . #	.
 tn. 9:. . .	 
.r+   