U
    yh&                     @   s   d dl Z d dlmZ d dlmZmZmZ d dlmZ d dl	m
Z
mZmZ d dlmZmZmZ dddgZed	ejjjd
ZG dd dejjjZG dd deejZG dd deejZG dd deejZdS )    N)_single_pair_triple)_FusedModule)TupleTypeVarUnion)	_size_1_t	_size_2_t	_size_3_tConv1dConv2dConv3dMOD)boundc                   @   s|   e Zd ZeZdeeeedf eedf eedf eedf eeedf eeeddddZ	dd Z
edd	d
Zdd ZdS )_ConvNdN.)in_channelsout_channelskernel_sizestridepaddingdilation
transposedoutput_paddinggroupsbiaspadding_modereturnc                 C   sX   ||d}t jjjj| |||||||||	|
|f| |s@td|| _|j|d| _d S )N)devicedtypez'qconfig must be provided for QAT module)factory_kwargs)	nnmodulesconvr   __init__AssertionErrorqconfigweightweight_fake_quant)selfr   r   r   r   r   r   r   r   r   r   r   r&   r   r   r     r*   N/var/www/html/venv/lib/python3.8/site-packages/torch/ao/nn/qat/modules/conv.pyr$      s     
      z_ConvNd.__init__c                 C   s   |  || | j| jS NZ_conv_forwardr(   r'   r   r)   inputr*   r*   r+   forward,   s    z_ConvNd.forwardFc                 C   s   t || jks(td| j d | jj t|ds:td|jsHtdtt |tr^|d }|j}| |j|j	|j
|j|j|j|j|jdk	|j|d
}|j|_|j|_|S )	zCreate a qat module from a float module

            Args:
               `mod`: a float module, either produced by torch.ao.quantization utilities
               or directly from user
        zqat.z.from_float only works for r&   z,Input float module must have qconfig definedz,Input float module must have a valid qconfigr   N)r   r   r   r   r   r   r&   )type_FLOAT_MODULEr%   __name__hasattrr&   
issubclassr   r   r   r   r   r   r   r   r   r   r'   )clsmoduse_precomputed_fake_quantr&   Zqat_convr*   r*   r+   
from_float/   s4        z_ConvNd.from_floatc                 C   s   t | }|| j| j| j| j| j| j| j| j	dk	| j
	}tj| j |_| j	dk	rjtj| j	 |_	t|tr|g}t|dst| }|| |j| }|| j |S |S dS )z This works for both single qat conv, and the qat conv - relu modules
        to convert the qat module to a floating point module
        N_FLOAT_RELU_MODULE)r1   _FLOAT_CONV_MODULEr   r   r   r   r   r   r   r   r   torchr!   	Parameterr'   detachr5   r   r4   r%   r:   appendr2   trainZtraining)r)   r6   r#   r"   ZreluZfusedr*   r*   r+   to_floatJ   s0    




z_ConvNd.to_float)NNN)F)r3   
__module____qualname__r   r2   intr   boolstrr$   r0   staticmethodr9   rA   r*   r*   r*   r+   r      s,      




r   c                       s`   e Zd ZdZejZejZdeee	e	e
ee	f e	eeedd
 fdd	Zed fdd	Z  ZS )r   aZ  
    A Conv1d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as :class:`~torch.nn.Conv1d`

    Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
       r   TzerosN
r   r   r   r   r   r   r   r   r   r   c                    s^   t |}t |}t|tr|nt |}t |}t j||||||dt d|||	|
||d d S NFr   )r   r   r   r   r   r   r   r   r&   r   r   )r   
isinstancerF   superr$   r)   r   r   r   r   r   r   r   r   r   r&   r   r   Zkernel_size_Zstride_Zpadding_Z	dilation_	__class__r*   r+   r$   x   s(    zConv1d.__init__Fc                    s   t  j| ||dS N)r8   rM   r9   r6   r7   r8   rO   r*   r+   r9      s    zConv1d.from_float)	rH   r   rH   rH   TrI   NNN)F)r3   rB   rC   __doc__r!   r   r2   r;   rD   r	   r   rF   rE   r$   classmethodr9   __classcell__r*   r*   rO   r+   r   h   s4            
!c                       sh   e Zd ZdZejZejZdeee	e	e
ee	f e	eeedd
 fdd	Zd
d Zed fdd	Z  ZS )r   a  
    A Conv2d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv2d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
    for documentation.

    Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    rH   r   TrI   NrJ   c                    s^   t |}t |}t|tr|nt |}t |}t j||||||dt d|||	|
||d d S rK   )r   rL   rF   rM   r$   rN   rO   r*   r+   r$      s(    zConv2d.__init__c                 C   s   |  || | j| jS r,   r-   r.   r*   r*   r+   r0      s    zConv2d.forwardFc                    s   t  j| ||dS rQ   rR   rS   rO   r*   r+   r9      s    zConv2d.from_float)	rH   r   rH   rH   TrI   NNN)F)r3   rB   rC   rT   r!   r   r2   r;   rD   r
   r   rF   rE   r$   r0   rU   r9   rV   r*   r*   rO   r+   r      s6            
!c                       sh   e Zd ZdZejZejZdeee	e	e
ee	f e	eeedd
 fdd	Zd
d Zed fdd	Z  ZS )r   a  
    A Conv3d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv3d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
    for documentation.

    Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    rH   r   TrI   NrJ   c                    s^   t |}t |}t|tr|nt |}t |}t j||||||dt d|||	|
||d d S rK   )r   rL   rF   rM   r$   rN   rO   r*   r+   r$      s(    zConv3d.__init__c                 C   s   |  || | j| jS r,   r-   r.   r*   r*   r+   r0   
  s    zConv3d.forwardFc                    s   t  j| ||dS rQ   rR   rS   rO   r*   r+   r9     s    zConv3d.from_float)	rH   r   rH   rH   TrI   NNN)F)r3   rB   rC   rT   r!   r   r2   r;   rD   r   r   rF   rE   r$   r0   rU   r9   rV   r*   r*   rO   r+   r      s6            
!)r<   Ztorch.nnr!   Ztorch.nn.modules.utilsr   r   r   Ztorch.ao.nn.intrinsicr   typingr   r   r   Ztorch.nn.common_typesr	   r
   r   __all__r"   r#   r   r   r   r   r   r*   r*   r*   r+   <module>   s   W5: