U
    yh4(                      @   s  U d dl mZ d dlm  m  mZ d dlmZmZm	Z	m
Z
mZmZmZmZ d dlmZmZmZ d dlZdddddd	gZd
d Zdd Zdd Zdd Zdd Zejejfeejejejfeejejfeejejejfeejej feejej ejfeejejfeej!ejejfeej"ejejfeej#ej$ejfeej$ejfeej%ejejfeej&ej ejfeej'ej(ejfeej)ejfeej*ej feiZ+eeeej,e	f f e-d< dddZ.dd Z/dd Z0dd Z1eeeeej,e	f f ddd	Z2dS )    N)AnyUnionCallableListTupleDictOptionalType)Patternget_combined_dictMatchAllNodefuse_conv_bnfuse_conv_bn_relufuse_linear_bnfuse_convtranspose_bnget_fuser_methodget_fuser_method_newc                 C   s   |j |j kstdtjtjtjtjtjtj	i}| r|j
|jksHtd|jsVtd|jsdtd|t|d}|dk	r|||S td||f ntj||S dS )a  Return the fused the conv and bn modules.
    Given the conv and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        conv: Module instance of type conv2d/conv3d
        bn: Spatial BN instance that needs to be fused with the conv

    Examples::

        >>> m1 = nn.Conv2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_conv_bn(m1, b1)
    :Conv and BN both must be in the same mode (train or eval).z?Output channel of Conv2d must match num_features of BatchNorm2dz7Only support fusing BatchNorm2d with affine set to TruezGOnly support fusing BatchNorm2d with tracking_running_stats set to TrueNCannot fuse train modules: )trainingAssertionErrornnConv1dnniZConvBn1dConv2dZConvBn2dConv3dZConvBn3dnum_featuresout_channelsaffinetrack_running_statsgettypeNotImplementedErrorutilsfuse_conv_bn_eval)is_qatconvbnZfused_module_class_mapZfused_module_class r(   ]/var/www/html/venv/lib/python3.8/site-packages/torch/ao/quantization/fuser_method_mappings.pyr      s&       
c                 C   s  |j |j   kr|j ks$n tdd}| rtjtjtjtjtjtj	i}|j
|jks\td|jsjtd|jsxtd|t|d}|dk	r||||S td|||f ndtjtjtjtjtjtji}|t|d}|dk	rtjj||}|||S td|||f dS )aJ  Return the fused conv and bv modules.

    Given the conv and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        conv: Module instance of type conv2d/conv3d
        bn: Spatial BN instance that needs to be fused with the conv

    Examples::

        >>> m1 = nn.Conv2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> r1 = nn.ReLU(inplace=False)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_conv_bn_relu(m1, b1, r1)
    r   Nz;Output channel of Conv must match num_features of BatchNormz5Only support fusing BatchNorm with affine set to TruezEOnly support fusing BatchNorm with tracking_running_stats set to Truer   zCannot fuse eval modules: )r   r   r   r   r   ZConvBnReLU1dr   ZConvBnReLU2dr   ZConvBnReLU3dr   r   r   r   r    r!   r"   
ConvReLU1d
ConvReLU2d
ConvReLU3dr#   fusionr$   )r%   r&   r'   ZreluZfused_moduleZmap_to_fused_module_trainZmap_to_fused_module_evalZ
fused_convr(   r(   r)   r   8   s>          

c                 C   sh   |j |j kstd| rT|j|jks,td|js:td|jsHtdt||S tj	j
||S dS )a  Return the fused linear and bn modules.
    Given the linear and bn modules, fuses them and returns the fused module

    Args:
        is_qat: a flag for whether we are using quantization aware training fusion
        or post training quantization fusion
        linear: Module instance of type Linear
        bn: BatchNorm1d instance that needs to be fused with the linear layer

    Examples::

        >>> m1 = nn.Linear(20, 10)
        >>> b1 = nn.BatchNorm1d(10)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_linear_bn(m1, b1)
    z<Linear and BN both must be in the same mode (train or eval).z@Output features of Linear must match num_features of BatchNorm1dz7Only support fusing BatchNorm1d with affine set to TruezGOnly support fusing BatchNorm1d with tracking_running_stats set to TrueN)r   r   r   Zout_featuresr   r   r   Z
LinearBn1dr   r#   r-   Zfuse_linear_bn_eval)r%   Zlinearr'   r(   r(   r)   r   i   s    c                 C   s:   |j |j kstd| r"tdntjjj||ddS dS )a  Return the fused ConvTranspose and bn modules.
    Given ConvTranspose and bn modules, fuses them and returns the fused module

    Args:
        convt: Module instance of type ConvTransposeNd
        bn: BatchNormNd instance that needs to be fused with the linear layer.
            batch norm N should match the ConvTranspose N

    Examples::

        >>> m1 = nn.ConvTranspose2d(10, 20, 3)
        >>> b1 = nn.BatchNorm2d(20)
        >>> # xdoctest: +SKIP
        >>> m2 = fuse_convtranspose_bn(m1, b1)
    zCConvTranspose and BN both must be in the same mode (train or eval).z8Fusing ConvTranspose+BatchNorm not yet supported in QAT.T)Z	transposeN)r   r   	Exceptionr   r#   r-   r$   )r%   Zconvtr'   r(   r(   r)   r      s    
c                    s    fdd}|S )a!  Return a sequential wrapped that for is_qat and two modules.
    Given a sequential class for two modules, return a function that takes
    is_qat, and then two modules as argument, that ignores the is_qat flag
    and always returns the sequential that combines the two input modules
    c                    s
    ||S Nr(   )r%   m1m2
sequentialr(   r)   fuser_method   s    z*_sequential_wrapper2.<locals>.fuser_methodr(   )r3   r4   r(   r2   r)   _sequential_wrapper2   s    r5    _DEFAULT_OP_LIST_TO_FUSER_METHODc                 C   s>   |dkri }t t|}|| d}|dk	s:td|  d|S )zGet fuser method for the given list of module types.

    Get fuser method for the given list of module types,
    return None if fuser method does not exist
    Ndid not find fuser method for:  )r   r6   r    r   )Zop_listZadditional_fuser_method_mappingZall_mappingsr4   r(   r(   r)   r      s    c                    s    fdd}|S )Nc                    s    | ||S r/   r(   )r%   xyfr(   r)   reversed   s    z_reverse2.<locals>.reversedr(   r<   r=   r(   r;   r)   	_reverse2   s    r?   c                    s    fdd}|S )Nc                    s   |\}} | |||S r/   r(   )r%   r9   wr:   zr;   r(   r)   r=      s    z_reverse3.<locals>.reversedr(   r>   r(   r;   r)   	_reverse3   s    rB   c                 C   sF   t | ttfr:g }| D ]}|t| qttj| }n| tg}|S )aQ  Return a list of valid patterns generated from the op_pattern.

    Returns a list of valid patterns generated from the op_pattern,
    since MatchAllNode can match all types of nodes,
    e.g. pattern (torch.nn.Conv2d, torch.add) should also be able to match keys like
    (MatchAllNode, torch.add) and (torch.nn.Conv2d, MatchAllNode)

    Example Input:
    (torch.add, (torch.nn.ReLU, torch.nn.Conv2d))

    Example Output:
    [(torch.add, (torch.nn.ReLU, torch.nn.Conv2d)),
     (torch.add, (torch.nn.ReLU, MatchAllNode)),
     (torch.add, (MatchAllNode, torch.nn.Conv2d)),
     (torch.add, (MatchAllNode, MatchAllNode)),
     (MatchAllNode, (torch.nn.ReLU, torch.nn.Conv2d)),
     (MatchAllNode, (torch.nn.ReLU, MatchAllNode)),
     (MatchAllNode, (MatchAllNode, torch.nn.Conv2d)),
     (MatchAllNode, (MatchAllNode, MatchAllNode)),
    ]
    )
isinstancetuplelistappend_get_valid_patterns	itertoolsproductr   )
op_patternZ	sub_combsZsub_patternresultr(   r(   r)   rG      s    rG   )rJ   fuser_method_mappingc                 C   sJ   t | }d}|D ]} || d}|dk	r q.q|dk	sFtd|  d|S )zGet fuser method.

    This will be made default after we deprecate the get_fuser_method
    Would like to implement this first and have a separate PR for deprecation
    Nr7   r8   )rG   r    r   )rJ   rL   Zop_patternsr4   r(   r(   r)   r      s    )N)3Ztorch.nnr   Ztorch.ao.nn.intrinsicZaoZ	intrinsicr   typingr   r   r   r   r   r   r   r	   Ztorch.ao.quantization.utilsr
   r   r   rH   __all__r   r   r   r   r5   r   ZBatchNorm1dZReLUr   ZBatchNorm2dr   ZBatchNorm3dr*   r+   r,   ZLinearZ
LinearReLUZBNReLU2dZBNReLU3dZConvTranspose1dZConvTranspose2dZConvTranspose3dr6   Z
Sequential__annotations__r   r?   rB   rG   r   r(   r(   r(   r)   <module>   sr    (	&1
  
  
  
 
 
 
 
 
 
 
 
 
 
!