U
    h                  	   @   sj  d dl Z d dlmZ d dlmZmZmZ d dlZd dlm	Z	 d dlm
Z
 d dlmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZmZmZ ddlm Z m!Z!m"Z" dddgZ#G dd deZ$G dd deZ%G dd deZ&G dd deZ'G dd deZ(eddeddd fddd d!d"eee(ef  e)e)ee'd#d$dZdS )%    N)partial)AnyOptionalUnion)Tensor)
functional   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)BasicConv2d	GoogLeNetGoogLeNet_WeightsGoogLeNetOutputs	InceptionInceptionAux   )_fuse_modules_replace_reluquantize_modelQuantizableGoogLeNetGoogLeNet_QuantizedWeights	googlenetc                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )QuantizableBasicConv2dNargskwargsreturnc                    s   t  j|| t | _d S N)super__init__nnReLUreluselfr    r!   	__class__ [/var/www/html/venv/lib/python3.8/site-packages/torchvision/models/quantization/googlenet.pyr%      s    zQuantizableBasicConv2d.__init__xr"   c                 C   s"   |  |}| |}| |}|S r#   )convbnr(   r*   r0   r-   r-   r.   forward   s    


zQuantizableBasicConv2d.forwardis_qatr"   c                 C   s   t | dddg|dd d S )Nr1   r2   r(   T)Zinplace)r   )r*   r6   r-   r-   r.   
fuse_model$   s    z!QuantizableBasicConv2d.fuse_model)N)__name__
__module____qualname__r   r%   r   r4   r   boolr7   __classcell__r-   r-   r+   r.   r      s   r   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionNr   c                    s&   t  j|dti| tj | _d S NZ
conv_block)r$   r%   r   r&   Z	quantizedZFloatFunctionalcatr)   r+   r-   r.   r%   )   s    zQuantizableInception.__init__r/   c                 C   s   |  |}| j|dS )Nr   )_forwardr?   )r*   r0   outputsr-   r-   r.   r4   -   s    
zQuantizableInception.forwardr8   r9   r:   r   r%   r   r4   r<   r-   r-   r+   r.   r=   (   s   r=   c                       s6   e Zd Zeedd fddZeedddZ  ZS )QuantizableInceptionAuxNr   c                    s$   t  j|dti| t | _d S r>   )r$   r%   r   r&   r'   r(   r)   r+   r-   r.   r%   4   s    z QuantizableInceptionAux.__init__r/   c                 C   sJ   t |d}| |}t|d}| | |}| |}| |}|S )N)   rD   r   )	FZadaptive_avg_pool2dr1   torchflattenr(   Zfc1ZdropoutZfc2r3   r-   r-   r.   r4   8   s    


zQuantizableInceptionAux.forwardrB   r-   r-   r+   r.   rC   2   s   rC   c                       sL   e Zd Zeedd fddZeedddZdee	 ddd	d
Z
  ZS )r   Nr   c                    s<   t  j|dtttgi| tjj | _	tjj
 | _d S )Nblocks)r$   r%   r   r=   rC   rF   ZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr)   r+   r-   r.   r%   L   s    zQuantizableGoogLeNet.__init__r/   c                 C   sp   |  |}| |}| |\}}}| |}| jo8| j}tj r^|sRt	
d t|||S | |||S d S )NzCScripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple)Z_transform_inputrI   r@   rJ   Ztraining
aux_logitsrF   ZjitZis_scriptingwarningswarnr   Zeager_outputs)r*   r0   aux1aux2Zaux_definedr-   r-   r.   r4   S   s    




zQuantizableGoogLeNet.forwardr5   c                 C   s(   |   D ]}t|tkr|| qdS )a  Fuse conv/bn/relu modules in googlenet model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r7   )r*   r6   mr-   r-   r.   r7   `   s    zQuantizableGoogLeNet.fuse_model)N)r8   r9   r:   r   r%   r   r   r4   r   r;   r7   r<   r-   r-   r+   r.   r   J   s   c                   @   sH   e Zd Zedeeddddeddejdd	d
didddd
dZ	e	Z
dS )r   zKhttps://download.pytorch.org/models/quantized/googlenet_fbgemm-c81f6644.pth   )Z	crop_sizeie )   rT   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg/tQ@g`"YV@)zacc@1zacc@5g+?g#~j<)@z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )
Z
num_paramsZmin_size
categoriesbackendZrecipeZunquantizedZ_metricsZ_ops
_file_sizeZ_docs)urlZ
transformsmetaN)r8   r9   r:   r   r   r	   r   r   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr-   r-   r-   r.   r   m   s(   
Zquantized_googlenet)nameZ
pretrainedc                 C   s   |  ddrtjS tjS )NquantizeF)getr   r\   r   r[   )r!   r-   r-   r.   <lambda>   s    
ra   )weightsTF)rb   progressr_   )rb   rc   r_   r!   r"   c                 K   s   |rt nt| } |dd}| dk	rd|kr:t|dd t|dd t|dd t|dt| jd  d	| jkrt|d	| jd	  |d	d
}tf |}t	| |rt
|| | dk	r|| j|dd |sd|_d|_d|_n
td |S )a  GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.GoogLeNet_Weights
        :members:
        :noindex:
    rK   FNZtransform_inputTZinit_weightsZnum_classesrV   rW   rU   )rc   Z
check_hashz`auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them)r   r   verifyr`   r   lenrZ   popr   r   r   Zload_state_dictZget_state_dictrK   rN   rO   rL   rM   )rb   rc   r_   r!   Zoriginal_aux_logitsrW   modelr-   r-   r.   r      s2    ,


)*rL   	functoolsr   typingr   r   r   rF   Ztorch.nnr&   r   r   rE   Ztransforms._presetsr	   Z_apir   r   r   _metar   Z_utilsr   r   r   r   r   r   r   r   r   utilsr   r   r   __all__r   r=   rC   r   r   r;   r-   r-   r-   r.   <module>   sJ    
#
