U
    h                  	   @   s6  d dl mZ d dlmZmZmZ d dlmZmZ d dl	m
Z
mZ d dlmZmZmZ ddlmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZ ddlmZmZm Z  dddgZ!G dd deZ"G dd deZ#G dd deZ$eddeddd fdddddeee$ef  e%e%ee#d d!dZ&dS )"    )partial)AnyOptionalUnion)nnTensor)DeQuantStub	QuantStub)InvertedResidualMobileNet_V2_WeightsMobileNetV2   )Conv2dNormActivation)ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_modelQuantizableMobileNetV2MobileNet_V2_QuantizedWeightsmobilenet_v2c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )QuantizableInvertedResidualNargskwargsreturnc                    s   t  j|| tj | _d S N)super__init__r   Z	quantizedZFloatFunctionalskip_addselfr    r!   	__class__ ]/var/www/html/venv/lib/python3.8/site-packages/torchvision/models/quantization/mobilenetv2.pyr%      s    z$QuantizableInvertedResidual.__init__xr"   c                 C   s(   | j r| j|| |S | |S d S r#   )Zuse_res_connectr&   addconvr(   r.   r+   r+   r,   forward   s    z#QuantizableInvertedResidual.forwardis_qatr"   c                 C   sN   t t| jD ]:}t| j| tjkrt| jt|t|d g|dd qd S )Nr   TZinplace)rangelenr0   typer   ZConv2dr   str)r(   r4   idxr+   r+   r,   
fuse_model"   s    z&QuantizableInvertedResidual.fuse_model)N__name__
__module____qualname__r   r%   r   r2   r   boolr;   __classcell__r+   r+   r)   r,   r      s   r   c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )r   Nr   c                    s"   t  j|| t | _t | _dS )zq
        MobileNet V2 main class

        Args:
           Inherits args from floating point MobileNetV2
        N)r$   r%   r	   quantr   dequantr'   r)   r+   r,   r%   )   s    zQuantizableMobileNetV2.__init__r-   c                 C   s"   |  |}| |}| |}|S r#   )rB   Z_forward_implrC   r1   r+   r+   r,   r2   4   s    


zQuantizableMobileNetV2.forwardr3   c                 C   sJ   |   D ]<}t|tkr.t|dddg|dd t|tkr|| qd S )N012Tr5   )modulesr8   r   r   r   r;   )r(   r4   mr+   r+   r,   r;   :   s
    z!QuantizableMobileNetV2.fuse_model)Nr<   r+   r+   r)   r,   r   (   s   c                   @   sH   e Zd Zedeeddddeddejdd	d
didddd
dZ	e	Z
dS )r   zOhttps://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth   )Z	crop_sizeiz5 )r   r   qnnpackzUhttps://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2zImageNet-1Kg'1Q@gV@)zacc@1zacc@5g$C?gMb@z
                These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
                weights listed below.
            )
Z
num_paramsZmin_size
categoriesbackendZrecipeZunquantizedZ_metricsZ_ops
_file_sizeZ_docs)urlZ
transformsmetaN)r=   r>   r?   r   r   r   r   r   IMAGENET1K_V1IMAGENET1K_QNNPACK_V1DEFAULTr+   r+   r+   r,   r   B   s(   
Zquantized_mobilenet_v2)nameZ
pretrainedc                 C   s   |  ddrtjS tjS )NquantizeF)getr   rQ   r   rP   )r!   r+   r+   r,   <lambda>b   s    
rV   )weightsNTF)rW   progressrT   )rW   rX   rT   r!   r"   c                 K   s   |rt nt| } | dk	rLt|dt| jd  d| jkrLt|d| jd  |dd}tf dti|}t	| |rt
|| | dk	r|| j|dd |S )	a  
    Constructs a MobileNetV2 architecture from
    `MobileNetV2: Inverted Residuals and Linear Bottlenecks
    <https://arxiv.org/abs/1801.04381>`_.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
        :members:
    .. autoclass:: torchvision.models.MobileNet_V2_Weights
        :members:
        :noindex:
    NZnum_classesrK   rL   rJ   blockT)rX   Z
check_hash)r   r   verifyr   r7   rO   popr   r   r   r   Zload_state_dictZget_state_dict)rW   rX   rT   r!   rL   modelr+   r+   r,   r   ^   s    ,

)'	functoolsr   typingr   r   r   Ztorchr   r   Ztorch.ao.quantizationr   r	   Ztorchvision.models.mobilenetv2r
   r   r   Zops.miscr   Ztransforms._presetsr   Z_apir   r   r   _metar   Z_utilsr   r   utilsr   r   r   __all__r   r   r   r@   r   r+   r+   r+   r,   <module>   sB   
