U
    h?                     @   s  d dl mZ d dlmZmZmZmZmZ d dlZd dlm	Z	m
Z
 ddlmZmZ ddlmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZmZ dddddgZG dd dZG dd de	jZ G dd de	jZ!d*e"e#e$e$edddZ%ee e&ee e$ee!dddZ'ded Z(G d!d deZ)G d"d deZ*e ed#e)j+fd$dd%d&ee) e$ee!d'd(dZ,e ed#e*j+fd$dd%d&ee* e$ee!d'd)dZ-dS )+    )partial)AnyCallableListOptionalSequenceN)nnTensor   )Conv2dNormActivationSqueezeExcitation)ImageClassification)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interfaceMobileNetV3MobileNet_V3_Large_WeightsMobileNet_V3_Small_Weightsmobilenet_v3_largemobilenet_v3_smallc                
   @   s>   e Zd Zeeeeeeeeed	ddZeeedddZ	dS )InvertedResidualConfig)	input_channelskernelexpanded_channelsout_channelsuse_se
activationstridedilation
width_multc
           
      C   sP   |  ||	| _|| _|  ||	| _|  ||	| _|| _|dk| _|| _|| _d S )NHS)	adjust_channelsr   r   r   r    r!   use_hsr#   r$   )
selfr   r   r   r    r!   r"   r#   r$   r%    r*   P/var/www/html/venv/lib/python3.8/site-packages/torchvision/models/mobilenetv3.py__init__   s    
zInvertedResidualConfig.__init__Zchannelsr%   c                 C   s   t | | dS )N   )r   r-   r*   r*   r+   r'   /   s    z&InvertedResidualConfig.adjust_channelsN)
__name__
__module____qualname__intboolstrfloatr,   staticmethodr'   r*   r*   r*   r+   r      s   r   c                       sX   e Zd Zeeejdfeedej	f edej	f d fddZ
eedddZ  ZS )	InvertedResidual)Zscale_activation.)cnf
norm_layerse_layerc                    s$  t    d|j  kr dks*n td|jdko>|j|jk| _g }|jrRtj	ntj
}|j|jkr|t|j|jd||d |jdkrdn|j}|t|j|j|j||j|j||d |jrt|jd d}|||j| |t|j|jd|d d tj| | _|j| _|jdk| _d S )Nr   r
   zillegal stride valuekernel_sizer9   activation_layer)r<   r#   r$   groupsr9   r=      r.   )superr,   r#   
ValueErrorr   r    use_res_connectr(   r   	HardswishZReLUr   appendr   r$   r   r!   r   
SequentialblockZ_is_cn)r)   r8   r9   r:   layersr=   r#   Zsqueeze_channels	__class__r*   r+   r,   6   sX    
    zInvertedResidual.__init__)inputreturnc                 C   s   |  |}| jr||7 }|S N)rF   rB   )r)   rJ   resultr*   r*   r+   forwardn   s    
zInvertedResidual.forward)r/   r0   r1   r   SElayerr   ZHardsigmoidr   r   Moduler,   r	   rN   __classcell__r*   r*   rH   r+   r7   4   s   8r7   c                
       sr   e Zd Zdee eeeedej	f  eedej	f  e
edd fddZeedd	d
ZeedddZ  ZS )r     N皙?.)inverted_residual_settinglast_channelnum_classesrF   r9   dropoutkwargsrK   c              
      s  t    t|  |s tdn$t|tr<tdd |D sDtd|dkrPt}|dkrht	t
jddd}g }|d	 j}	|td
|	d
d|t
jd |D ]}
|||
| q|d j}d| }|t||d|t
jd t
j| | _t
d| _t
t
||t
jddt
j|ddt
||| _|  D ]}t|t
jrlt
jj|jdd |jdk	rt
j|j n`t|t
jt
jfrt
j |j t
j|j n.t|t
jr,t
j!|jd	d t
j|j q,dS )a.  
        MobileNet V3 main class

        Args:
            inverted_residual_setting (List[InvertedResidualConfig]): Network structure
            last_channel (int): The number of channels on the penultimate layer
            num_classes (int): Number of classes
            block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            dropout (float): The droupout probability
        z1The inverted_residual_setting should not be emptyc                 S   s   g | ]}t |tqS r*   )
isinstancer   ).0sr*   r*   r+   
<listcomp>   s     z(MobileNetV3.__init__.<locals>.<listcomp>zDThe inverted_residual_setting should be List[InvertedResidualConfig]NgMbP?g{Gz?)epsZmomentumr      r
   )r<   r#   r9   r=      r   r;   T)inplace)pra   Zfan_out)mode)"r@   r,   r   rA   rY   r   all	TypeErrorr7   r   r   ZBatchNorm2dr   rD   r   rC   r    rE   featuresZAdaptiveAvgPool2davgpoolZLinearZDropout
classifiermodulesZConv2dinitZkaiming_normal_weightZbiasZzeros_Z	GroupNormZones_Znormal_)r)   rT   rU   rV   rF   r9   rW   rX   rG   Zfirstconv_output_channelsr8   Zlastconv_input_channelsZlastconv_output_channelsmrH   r*   r+   r,   v   sr    







zMobileNetV3.__init__)xrK   c                 C   s.   |  |}| |}t|d}| |}|S )Nr   )rf   rg   torchflattenrh   r)   rm   r*   r*   r+   _forward_impl   s
    


zMobileNetV3._forward_implc                 C   s
   |  |S rL   )rq   rp   r*   r*   r+   rN      s    zMobileNetV3.forward)rR   NNrS   )r/   r0   r1   r   r   r2   r   r   r   rP   r5   r   r,   r	   rq   rN   rQ   r*   r*   rH   r+   r   u   s        [
      ?F)archr%   reduced_taildilatedrX   c                 K   s  |rdnd}|rdnd}t t|d}t tj|d}| dkr|dddddddd|ddd	d
dddd|d
ddd
dddd|d
ddddddd|dddddddd|dddddddd|dddddddd|dddddddd|dddddddd|dddddddd|dddddddd|dddddddd|dddd| ddd||d| dd| d| ddd||d| dd| d| ddd|g}	|d| }
n"| dkr|dddddddd|dddd
dddd|d
ddd
dddd|d
ddddddd|dddddddd|dddddddd|dddddddd|dddddddd|ddd d| ddd||d| dd!| d| ddd||d| dd!| d| ddd|g}	|d"| }
ntd#|  |	|
fS )$Nr
   r   )r%   r      r^   FZRE@      H      (   Tx      P   r&         i  p   i     i  i   r   X   `   0      i   i@  i   zUnsupported model type )r   r   r'   rA   )rs   r%   rt   ru   rX   Zreduce_dividerr$   Z
bneck_confr'   rT   rU   r*   r*   r+   _mobilenet_v3_conf   sL    
  
  r   )rT   rU   weightsprogressrX   rK   c                 K   sL   |d k	rt |dt|jd  t| |f|}|d k	rH||j|dd |S )NrV   
categoriesT)r   Z
check_hash)r   lenmetar   Zload_state_dictZget_state_dict)rT   rU   r   r   rX   modelr*   r*   r+   _mobilenet_v3  s    r   )r   r   )Zmin_sizer   c                   @   sv   e Zd Zedeeddedddddd	id
ddddZedeedddedddddd	id
ddddZeZ	dS )r   zChttps://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth   	crop_sizeiS ^https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--smallImageNet-1Kg R@g(\V@zacc@1zacc@5g-?gw/5@zJThese weights were trained from scratch by using a simple training recipe.Z
num_paramsZrecipeZ_metricsZ_ops
_file_sizeZ_docsurlZ
transformsr   zChttps://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth   )r   Zresize_sizezHhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuninggK7R@gNbX9$W@gZd5@a/  
                These weights improve marginally upon the results of the original paper by using a modified version of
                TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            N)
r/   r0   r1   r   r   r   _COMMON_METAIMAGENET1K_V1ZIMAGENET1K_V2DEFAULTr*   r*   r*   r+   r   (  sB   
c                   @   sB   e Zd Zedeeddedddddd	id
ddddZeZdS )r   zChttps://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pthr   r   i& r   r   gnP@g}?5^U@r   gv/?g r#@z}
                These weights improve upon the results of the original paper by using a simple training recipe.
            r   r   N)	r/   r0   r1   r   r   r   r   r   r   r*   r*   r*   r+   r   T  s"   
Z
pretrained)r   T)r   r   )r   r   rX   rK   c                 K   s*   t | } td|\}}t||| |f|S )a  
    Constructs a large MobileNetV3 architecture from
    `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.

    Args:
        weights (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MobileNet_V3_Large_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
        :members:
    r   )r   )r   verifyr   r   r   r   rX   rT   rU   r*   r*   r+   r   l  s    
c                 K   s*   t | } td|\}}t||| |f|S )a  
    Constructs a small MobileNetV3 architecture from
    `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.

    Args:
        weights (:class:`~torchvision.models.MobileNet_V3_Small_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MobileNet_V3_Small_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MobileNet_V3_Small_Weights
        :members:
    r   )r   )r   r   r   r   r   r*   r*   r+   r     s    
)rr   FF).	functoolsr   typingr   r   r   r   r   rn   r   r	   Zops.miscr   r   rO   Ztransforms._presetsr   utilsr   Z_apir   r   r   _metar   Z_utilsr   r   r   __all__r   rP   r7   r   r4   r5   r3   r   r2   r   r   r   r   r   r   r   r*   r*   r*   r+   <module>   sx   	Ak         2,      