U
    h%                     @   s  d dl mZ d dlmZmZmZmZ d dlZd dlmZm	Z	 ddl
mZ ddlmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZmZ dddgZG dd dejZG dd dejZddedZG dd deZe edej fddddee e!eedddZ"dS )    )partial)AnyCallableListOptionalN)nnTensor   )Conv2dNormActivation)ImageClassification)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_make_divisible_ovewrite_named_paramhandle_legacy_interfaceMobileNetV2MobileNet_V2_Weightsmobilenet_v2c                	       sL   e Zd Zd	eeeeeedejf  dd fddZe	e	dddZ
  ZS )
InvertedResidualN.)inpoupstrideexpand_ratio
norm_layerreturnc                    s   t    || _|dkr&td| |d kr4tj}tt|| }| jdkoT||k| _g }|dkr~|	t
||d|tjd |t
|||||tjdtj||ddddd||g tj| | _|| _|dk| _d S )	N)r   r	   z#stride should be 1 or 2 instead of r   Zkernel_sizer   activation_layer)r   groupsr   r    r   F)bias)super__init__r   
ValueErrorr   BatchNorm2dintrounduse_res_connectappendr
   ReLU6extendConv2d
SequentialconvZout_channelsZ_is_cn)selfr   r   r   r   r   Z
hidden_dimZlayers	__class__ P/var/www/html/venv/lib/python3.8/site-packages/torchvision/models/mobilenetv2.pyr$      s:    
	zInvertedResidual.__init__xr   c                 C   s"   | j r|| | S | |S d S N)r)   r/   r0   r6   r3   r3   r4   forward<   s    zInvertedResidual.forward)N)__name__
__module____qualname__r'   r   r   r   Moduler$   r   r9   __classcell__r3   r3   r1   r4   r      s        (r   c                
       sz   e Zd Zdeeeeee   eeedej	f  eedej	f  edd fdd	Z
eed
ddZeed
ddZ  ZS )r           ?N   皙?.)num_classes
width_multinverted_residual_settinground_nearestblockr   dropoutr   c                    s@  t    t|  |dkrt}|dkr,tj}d}d}	|dkrddddgddddgddd	dgdd
ddgddd	dgddd	dgddddgg}t|dkst|d dkrtd| t|| |}t|	t	d| || _
td	|d|tjdg}
|D ]X\}}}}t|| |}t|D ]4}|dkr"|nd}|
||||||d |}qq|
t|| j
d|tjd tj|
 | _ttj|dt| j
|| _|  D ]}t|tjrtjj|jdd |jdk	r8tj|j n`t|tjtjfr
tj|j tj|j n.t|tjrtj|jdd tj|j qdS )aw  
        MobileNet V2 main class

        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
            Set to 1 to turn off rounding
            block: Module specifying inverted residual building block for mobilenet
            norm_layer: Module specifying the normalization layer to use
            dropout (float): The droupout probability

        N    i   r            r	      @      `      i@  r   zGinverted_residual_setting should be non-empty or a 4-element list, got r@   )r   r   r    )r   r   r   )pZfan_out)modeg{Gz?)r#   r$   r   r   r   r&   lenr%   r   maxlast_channelr
   r+   ranger*   r.   featuresZDropoutZLinear
classifiermodules
isinstancer-   initZkaiming_normal_weightr"   Zzeros_Z	GroupNormZones_Znormal_)r0   rC   rD   rE   rF   rG   r   rH   Zinput_channelrV   rX   tcnsZoutput_channelir   mr1   r3   r4   r$   D   sp    








    
zMobileNetV2.__init__r5   c                 C   s2   |  |}tj|d}t|d}| |}|S )Nr   r   r   )rX   r   Z
functionalZadaptive_avg_pool2dtorchflattenrY   r8   r3   r3   r4   _forward_impl   s
    

zMobileNetV2._forward_implc                 C   s
   |  |S r7   )rg   r8   r3   r3   r4   r9      s    zMobileNetV2.forward)r?   r@   NrA   NNrB   )r:   r;   r<   r'   floatr   r   r   r   r=   r$   r   rg   r9   r>   r3   r3   r1   r4   r   C   s&          _
iz5 rd   )Z
num_paramsZmin_size
categoriesc                
   @   sr   e Zd Zedeeddedddddid	d
dddZedeedddedddddid	ddddZeZ	dS )r   z=https://download.pytorch.org/models/mobilenet_v2-b0353104.pth   )	crop_sizezQhttps://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2zImageNet-1Kgx&1Q@gMV@)zacc@1zacc@5g$C?g\(+@zXThese weights reproduce closely the results of the paper using a simple training recipe.)ZrecipeZ_metricsZ_ops
_file_sizeZ_docs)urlZ
transformsmetaz=https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth   )rk   Zresize_sizezHhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuningg`"	R@gS㥛V@gV-2+@a$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            N)
r:   r;   r<   r   r   r   _COMMON_METAIMAGENET1K_V1ZIMAGENET1K_V2DEFAULTr3   r3   r3   r4   r      s>   
Z
pretrained)weightsT)rs   progress)rs   rt   kwargsr   c                 K   sR   t | } | dk	r(t|dt| jd  tf |}| dk	rN|| j|dd |S )a  MobileNetV2 architecture from the `MobileNetV2: Inverted Residuals and Linear
    Bottlenecks <https://arxiv.org/abs/1801.04381>`_ paper.

    Args:
        weights (:class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.MobileNet_V2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.mobilenetv2.MobileNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.MobileNet_V2_Weights
        :members:
    NrC   ri   T)rt   Z
check_hash)r   verifyr   rT   rn   r   Zload_state_dictZget_state_dict)rs   rt   ru   modelr3   r3   r4   r      s    

)#	functoolsr   typingr   r   r   r   re   r   r   Zops.miscr
   Ztransforms._presetsr   utilsr   Z_apir   r   r   _metar   Z_utilsr   r   r   __all__r=   r   r   rp   r   rq   boolr   r3   r3   r3   r4   <module>   s6   
0o*   