U
    Kcm5                     @   s   d dl Z d dlmZ d dl mZ d dlmZmZ ddlmZ ddlm	Z	 ddd	gZ
G d
d de jjZG dd de jjZG dd	 d	eZdS )    N)Tensor)OptionalList   )hide_packed_params_repr)_quantize_weightEmbeddingPackedParams	EmbeddingEmbeddingBagc                       sx   e Zd ZdZejf fdd	Zejjej	ddddZ
ejjdd	 Zd
d Z fddZ fddZdd Z  ZS )r   r   c                    s~   t t|   || _| jtjtjfkrltj|tjd}tj	|tjd}tj
||g||d| jd}| | ntd| d S )Ndtyper   scaleszero_pointsZaxisr   zSUnsupported dtype on quantized embedding! Supports quint8 and quint4x2. Got dtype: )superr   __init__r   torchquint8quint4x2onesfloatzeros#_empty_per_channel_affine_quantized
set_weightNotImplementedError)selfnum_embeddingsembedding_dimr   r   r   Zwq	__class__ O/tmp/pip-unpacked-wheel-gikjz4vx/torch/ao/nn/quantized/modules/embedding_ops.pyr      s     zEmbeddingPackedParams.__init__N)weightreturnc                 C   s0   | j tjtjfkr$tjj|| _ntdd S )NzPUnsupported dtype for quantized embedding prepack! Supports quint8 and quint4x2.)	r   r   r   r   ops	quantizedZembedding_bag_prepack_packed_weightr   )r   r"   r    r    r!   r      s    z EmbeddingPackedParams.set_weightc                 C   s.   | j tjtjfkr"tjj| jS tdd S )NzOUnsupported dtype for quantized embedding unpack! Supports quint8 and quint4x2.)	r   r   r   r   r$   r%   Zembedding_bag_unpackr&   r   r   r    r    r!   _weight#   s    zEmbeddingPackedParams._weightc                 C   s   |S Nr    )r   xr    r    r!   forward*   s    zEmbeddingPackedParams.forwardc                    s6   t t| ||| | j||d < |  ||d < d S )Nr   r&   )r   r   _save_to_state_dictr   r(   )r   ZdestinationprefixZ	keep_varsr   r    r!   r,   2   s    z)EmbeddingPackedParams._save_to_state_dictc           	   	      s`   ||d  | _ ||d  ||d  }||d  | | tt| |||d||| d S )Nr   r&   F)r   popr   r   r   _load_from_state_dict)	r   Z
state_dictr-   Zlocal_metadatastrictZmissing_keysZunexpected_keysZ
error_msgsr"   r   r    r!   r/   7   s    
  z+EmbeddingPackedParams._load_from_state_dictc                 C   s   |    S r)   )r(   __repr__r'   r    r    r!   r1   C   s    zEmbeddingPackedParams.__repr__)__name__
__module____qualname___versionr   r   r   Zjitexportr   r   r(   r+   r,   r/   r1   __classcell__r    r    r   r!   r      s   
c                       s   e Zd ZdZdZddddddejfeeee ee	 e	e
e
ee dd	 fddZeed	d
dZdd Zdd Zdd ZejddddZdd Zedd Zedd Z  ZS )r	   a&  
    A quantized Embedding module with quantized packed weights as inputs.
    We adopt the same interface as `torch.nn.Embedding`, please see
    https://pytorch.org/docs/stable/nn.html#torch.nn.Embedding for documentation.

    Similar to :class:`~torch.nn.Embedding`, attributes will be randomly
    initialized at module creation time and will be overwritten later

    Attributes:
        weight (Tensor): the non-learnable quantized weights of the module of
                         shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.

    Examples::
        >>> m = nn.quantized.Embedding(num_embeddings=10, embedding_dim=12)
        >>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
        >>> output = m(indices)
        >>> print(output.size())
        torch.Size([9, 12])

    r   N       @F)	r   r   padding_idxmax_norm	norm_typescale_grad_by_freqsparser(   r#   c
                    s   t t|   || _|| _|	| _|d krdtj|tjd}
tj	|tjd}tj
||g|
|dtjd}nt|j||gks~td|}t|||	| _| j| d S )Nr   r   r   z?Shape of weight does not match num_embeddings and embedding_dim)r   r	   r   r   r   r   r   r   r   r   r   r   listshapeAssertionErrorr   _packed_paramsr   )r   r   r   r9   r:   r;   r<   r=   r(   r   r   r   qweightr   r    r!   r   ]   s&    
  zEmbedding.__init__)indicesr#   c                 C   s8   | j tjkr tjj| jj|S tjj| jj|S d S r)   )	r   r   r   r$   r%   Zembedding_4bitrA   r&   Zembedding_byte)r   rC   r    r    r!   r+   s   s    zEmbedding.forwardc                 C   s   dS )NZQuantizedEmbeddingr    r'   r    r    r!   	_get_namey   s    zEmbedding._get_namec                 C   s
   t | tS r)   )r   r   r'   r    r    r!   r1   |   s    zEmbedding.__repr__c                 C   s$   d | j| j| jj|   }|S )Nz9num_embeddings={}, embedding_dim={}, dtype={}, qscheme={})formatr   r   rA   r   r"   qscheme)r   Zextra_repr_strr    r    r!   
extra_repr   s       
zEmbedding.extra_repr)wr#   c                 C   s   | j | d S r)   )rA   r   )r   rH   r    r    r!   r      s    zEmbedding.set_weightc                 C   s
   | j  S r)   )rA   r(   r'   r    r    r!   r"      s    zEmbedding.weightc           	      C   s:  t |drPt|tjjjjksBtd| j d d tjjjjj |j	}|j
}npt|tjksxtd| j d tjj t |dstddd	lm} |jd
k	r|jjd
k	r|j }n| }|j}|jtjk}|std|tjks|tjkstd| ||j t|j |}t|j|j}|| |S )zCreate a quantized embedding module from a float module

        Args:
            mod (Module): a float module, either produced by torch.ao.quantization
                          utilities or provided by user
        weight_fake_quantnnq.z.from_float zwith fake quant only works for .from_float only works for qconfigz6Embedding input float module must have qconfig definedr   !float_qparams_weight_only_qconfigNzPEmbedding quantization is only supported with float_qparams_weight_only_qconfig.zSThe only supported dtype for nnq.Embedding is torch.quint8 and torch.quint4x2, got )hasattrtyper   ZaonnZqatr	   r@   r2   rI   activation_post_processZtorch.ao.quantizationrN   rL   r"   r   rF    per_channel_affine_float_qparamsr   r   r   r   r   r   r   )	clsmodweight_observerrR   rN   r   is_float_qparams_qconfigrB   
qembeddingr    r    r!   
from_float   s:    
"

zEmbedding.from_floatc              
   C   s0   | |j |j|j|j|j|j|j| |j	}|S r)   )	r   r   r9   r:   r;   r<   r=   get_quantized_weightweight_dtype)rT   Zref_embeddingrX   r    r    r!   from_reference   s    zEmbedding.from_reference)r2   r3   r4   __doc__r5   r   r   intr   r   boolr   r   r+   rD   r1   rG   r   r"   classmethodrY   r\   r7   r    r    r   r!   r	   F   s6           
'c                       s   e Zd ZdZdZdddddddejfeeee	 e	e
ee
ee e
dd
 fdd	Zdeee ee ee ed
ddZdd Zedd Zedd Z  ZS )r
   a  
    A quantized EmbeddingBag module with quantized packed weights as inputs.
    We adopt the same interface as `torch.nn.EmbeddingBag`, please see
    https://pytorch.org/docs/stable/nn.html#torch.nn.EmbeddingBag for documentation.

    Similar to :class:`~torch.nn.EmbeddingBag`, attributes will be randomly
    initialized at module creation time and will be overwritten later

    Attributes:
        weight (Tensor): the non-learnable quantized weights of the module of
                         shape :math:`(\text{num\_embeddings}, \text{embedding\_dim})`.

    Examples::
        >>> m = nn.quantized.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, mode='sum')
        >>> indices = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8, 6, 6, 9, 1, 6, 8, 8, 3, 2, 3, 6, 3, 6, 5, 7, 0, 8, 4, 6, 5, 8, 2, 3])
        >>> offsets = torch.tensor([0, 19, 20, 28, 28, 32])
        >>> output = m(indices, offsets)
        >>> print(output.size())
        torch.Size([5, 12])

    r   Nr8   Fsum)
r   r   r:   r;   r<   moder=   r(   include_last_offsetr#   c                    s4   t t| j||||
d || _d| _|	| _|
| _d S )N)r(   r   F)r   r
   r   rb   pruned_weightsrc   r   )r   r   r   r:   r;   r<   rb   r=   r(   rc   r   r   r    r!   r      s
    zEmbeddingBag.__init__)rC   offsetsper_sample_weightscompressed_indices_mappingr#   c                 C   s\   | j tjkr2tjj| jj||dd| j||| j		S tjj
| jj||dd| j||| j		S d S )NFr   )r   r   r   r$   r%   Zembedding_bag_4bitrA   r&   rd   rc   Zembedding_bag_byte)r   rC   re   rf   rg   r    r    r!   r+      s        zEmbeddingBag.forwardc                 C   s   dS )NZQuantizedEmbeddingBagr    r'   r    r    r!   rD      s    zEmbeddingBag._get_namec                 C   s   t |dr|j}npt|tjks:td| j d tjj t |dsLtdddlm} |j	dk	rz|j	j
dk	rz|j	
 }n|
 }|j}|jtjk}|std	|tjks|tjkstd
| ||j
 t|j
 |}t|j|j|d}|| |S )zCreate a quantized embedding_bag module from a float module

        Args:
            mod (Module): a float module, either produced by torch.ao.quantization
                          utilities or provided by user
        rI   rJ   rK   rL   z9EmbeddingBag input float module must have qconfig definedr   rM   NzSEmbeddingBag quantization is only supported with float_qparams_weight_only_qconfig.zVThe only supported dtype for nnq.EmbeddingBag is torch.quint8 and torch.quint4x2, got r   )rO   rI   rP   rQ   r
   r@   r2   Ztorch.ao.quantization.qconfigrN   rL   r"   r   rF   r   rS   r   r   r   r   r   r   r   )rT   rU   rV   rN   r   rW   rB   qembedding_bagr    r    r!   rY      s.    


zEmbeddingBag.from_floatc                 C   s4   | |j |j|j|j|j|j|j| |j|j	
}|S r)   )
r   r   r:   r;   r<   rb   r=   rZ   rc   r[   )rT   Zref_embedding_bagrh   r    r    r!   r\     s    zEmbeddingBag.from_reference)NNN)r2   r3   r4   r]   r5   r   r   r^   r   r   r_   strr   r   r+   rD   r`   rY   r\   r7   r    r    r   r!   r
      s>                
$)r   Ztorch.nnrQ   r   Ztorch._jit_internalr   r   utilsr   r   __all__Moduler   r	   r
   r    r    r    r!   <module>   s   
;}