U
    yh!                     @   sX   d dl mZ d dlZd dlmZmZ ddgZG dd dejjZ	G dd dejjZ
dS )    )OptionalN)_quantize_weight_hide_packed_params_reprLinearPackedParamsLinearc                       s   e Zd ZdZddejf fdd	Zdd Zejj	ej
eej
 ee ee ddd	d
Zejj	dd Zdd Z fddZ fddZejj	dd Zejj	dd Zdd Z  ZS )r         c                    sN   t    |tjkrtd|| _tjddgddtjd}| |d || d S )Nz%Linear prepacking only supports QINT8r         ?r   scale
zero_pointdtype)super__init__torchqint8NotImplementedErrorr   _empty_affine_quantizedset_weight_bias)selfrow_block_sizecol_block_sizer   Zwq	__class__ U/var/www/html/venv/lib/python3.8/site-packages/torch/ao/nn/sparse/quantized/linear.pyr      s    

zLinearPackedParams.__init__c                 C   s   dS )NZ!SparseQuantizedLinearPackedParamsr   r   r   r   r   	_get_name   s    zLinearPackedParams._get_nameN)weightbiasr   r   returnc                 C   s.   |d k	r|d k	st tjj||||| _d S N)AssertionErrorr   opssparseZqlinear_prepack_packed_params)r   r   r   r   r   r   r   r   r      s    z"LinearPackedParams.set_weight_biasc                 C   s*   t jj| j\}}}|||d |d fS )Nr   r   )r   r#   r$   Zqlinear_unpackr%   )r   r   r   Zblock_sizesr   r   r   _weight_bias   s    zLinearPackedParams._weight_biasc                 C   s   |S r!   r   r   xr   r   r   forward$   s    zLinearPackedParams.forwardc                    s2   t  ||| | j||d < |  ||d < d S )Nr   r%   )r   _save_to_state_dictr   r&   r   ZdestinationprefixZ	keep_varsr   r   r   r*   '   s    z&LinearPackedParams._save_to_state_dictc              	      sl   | dd }|| jkst||d | _||d \}	}
}}| |	|
|| t |||d||| d S )Nversionr   r%   F)get_versionr"   popr   r   r   _load_from_state_dict)r   
state_dictr,   local_metadatastrictmissing_keysunexpected_keys
error_msgsr-   r   r   r   r   r   r   r   r1   ,   s      z(LinearPackedParams._load_from_state_dictc                 C   s   | j | j| jfS r!   r%   Ztrainingr   r   r   r   r   __getstate__8   s    zLinearPackedParams.__getstate__c                 C   s   |\| _ | _| _d S r!   r8   )r   stater   r   r   __setstate__<   s    zLinearPackedParams.__setstate__c                 C   s   |    S r!   )r&   __repr__r   r   r   r   r<   @   s    zLinearPackedParams.__repr__)__name__
__module____qualname__r/   r   r   r   r   ZjitZexportTensorr   intr   r&   r)   r*   r1   r9   r;   r<   __classcell__r   r   r   r   r   
   s&   	  


c                       s   e Zd ZdZdZejjZdej	f fdd	Z
edd Zdd	 Zd
d ZejejdddZ fddZ fddZdd Zdd Zdd Zejeej ee ee ddddZed ddZ  ZS )!r   zW
    A quantized sparse linear module with quantized tensor as inputs and outputs.
    r   Tc                    s   t    |tjkrtd|| _|| _|r@tj| jtjd}nd }tj	||gddtjd}t
|||d| _| j|||| d| _d| _d S )Nz3Only QINT8 is supported for Sparse Quantized Linearr   r   r   r
   )r   r   r   r	   )r   r   r   r   r   in_featuresout_featuresZzerosfloatr   r   r%   r   r   r   )r   rD   rE   r   r   r   r   qweightr   r   r   r   K   s(    


  zLinear.__init__c                 C   s   dS )NZSparseQuantizedLinearr   )clsr   r   r   r   b   s    zLinear._get_namec                 C   s2   d| j  d| j d| j d| j d|    
S )Nzin_features=z, out_features=z, scale=z, zero_point=z
, qscheme=)rD   rE   r   r   r   Zqschemer   r   r   r   
extra_reprf   s    zLinear.extra_reprc                 C   s
   t | tS r!   )r   r   r   r   r   r   r<   j   s    zLinear.__repr__)r(   r    c                 C   s   t jj|| jj| j| jS r!   )r   r#   r$   qlinearr%   r   r   r'   r   r   r   r)   m   s    zLinear.forwardc                    s<   t  ||| t| j||d < t| j||d < d S )Nr   r   )r   r*   r   Ztensorr   r   r+   r   r   r   r*   p   s    zLinear._save_to_state_dictc           
   	      s   t ||d  | _||d  t||d  | _||d  t||d  }||d  |dd }	|	| jksxtt 	|||d||| d S )Nr   r   op_typer-   F)
rF   r   r0   rA   r   r.   r/   r"   r   r1   )
r   r2   r,   r3   r4   r5   r6   r7   rK   r-   r   r   r   r1   u   s"         zLinear._load_from_state_dictc                 C   s
   | j  S r!   )r%   r&   r   r   r   r   r&      s    zLinear._weight_biasc                 C   s   |   d S )Nr   r&   r   r   r   r   r      s    zLinear.weightc                 C   s   |   d S )Nr   rL   r   r   r   r   r      s    zLinear.biasN)wbr   r   r    c                 C   s*   |d k	r|d k	st | j|||| d S r!   )r"   r%   r   )r   rM   rN   r   r   r   r   r   r      s    zLinear.set_weight_biasFc                 C   sf  t || jks&t|  d | jj t|ds8td|jdd}t|t	t
fsXtt|dkshtt|dsztd|j}|j }|j}|| |j}| \}}	|tjkstd	| \}
}t|tjrt| rtd
n|dkstdt| |}|jd d }|jd d }| |j|j|||d}|||j|| t||_t|	|_|S )zCreate a quantized sparse module from a float module.

        We only care about the convert at this stage, no need for observers just yet.

        TODO(zaf): Need to add the sparse params to the qconfig
        z.from_float only works for sparse_paramszExpecting the Linear to have `sparse_params`. Make sure you have provided arguments in the `sparsifier.squash_mask(params_to_save=("sparse_block_shape",))` method.sparse_block_shapeN   qconfigz,Input float module must have qconfig definedz+Weight observer must have dtype torch.qint8z$All weight zero points must map to 0r   zWeight zero point must map to 0r   rC   )type_FLOAT_MODULEr"   r   r=   hasattrrO   r.   
isinstancetuplelistlenactivation_post_processrR   r   r   Zcalculate_qparamsr   r   r@   anyboolr   rF   rD   rE   r   r   r   rA   r   )rH   modZuse_precomputed_fake_quantrP   rZ   Zweight_post_processr   r   Z	act_scaleZact_zpZw_scZw_zprG   r   r   rJ   r   r   r   
from_float   sN    

 

zLinear.from_float)F)r=   r>   r?   __doc__r/   r   nnr   rT   r   r   classmethodr   rI   r<   r@   r)   r*   r1   r&   r   r   r   rA   r   r^   rB   r   r   r   r   r   D   s*   
  )typingr   r   Z#torch.ao.nn.quantized.modules.utilsr   r   __all__r`   Moduler   r   r   r   r   r   <module>   s
   :