U
    Kc;)                     @   s   d dl Z G dd de jjZG dd de jjZG dd de jjZG dd	 d	e jjZG d
d de jjZG dd de jjZG dd de j	jj
jZG dd de jjZdS )    Nc                       s>   e Zd ZdZd fdd	Zdd Zdd Zed	d
 Z  Z	S )ReLU6a  Applies the element-wise function:

    :math:`\text{ReLU6}(x) = \min(\max(x_0, x), q(6))`, where :math:`x_0` is the
    zero_point, and :math:`q(6)` is the quantized representation of number 6.

    Args:
        inplace: can optionally do the operation in-place. Default: ``False``

    Shape:
        - Input: :math:`(N, *)` where `*` means, any number of additional
          dimensions
        - Output: :math:`(N, *)`, same shape as the input

    .. image:: ../scripts/activation_images/ReLU6.png

    Examples::

        >>> m = nn.quantized.ReLU6()
        >>> input = torch.randn(2)
        >>> # xdoctest: +SKIP
        >>> input = torch.quantize_per_tensor(input, 1.0, 0, dtype=torch.qint32)
        >>> output = m(input)
    Fc                    s   t t| | || _d S N)superr   __init__inplace)selfr   	__class__ L/tmp/pip-unpacked-wheel-gikjz4vx/torch/ao/nn/quantized/modules/activation.pyr      s    zReLU6.__init__c                 C   s   t jj|| jS r   )torchops	quantizedZrelu6r   r   inputr
   r
   r   forward   s    zReLU6.forwardc                 C   s   dS )NZQuantizedReLU6r
   r   r
   r
   r   	_get_name"   s    zReLU6._get_namec                 C   s
   t | jS r   )r   r   )modr
   r
   r   
from_float%   s    zReLU6.from_float)F)
__name__
__module____qualname____doc__r   r   r   staticmethodr   __classcell__r
   r
   r   r   r      s   r   c                       sH   e Zd ZdZ fddZdd Zdd Zedd	 Ze	d
d Z
  ZS )	HardswishzThis is the quantized version of :class:`~torch.nn.Hardswish`.

    Args:
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
    c                    s   t t|   || _|| _d S r   )r   r   r   scale
zero_point)r   r   r   r   r
   r   r   0   s    zHardswish.__init__c                 C   s   t jjjjj|| j| jdS )N)r   r   )r   aonnr   
functionalZ	hardswishr   r   r   r
   r
   r   r   5   s
      zHardswish.forwardc                 C   s   dS )NZQuantizedHardswishr
   r   r
   r
   r   r   9   s    zHardswish._get_namec                 C   s    | j  \}}tt|t|S r   )activation_post_processcalculate_qparamsr   floatintr   r   r   r
   r
   r   r   <   s    zHardswish.from_floatc                 C   s   | t |t|S r   )r$   r%   clsr   r   r   r
   r
   r   from_referenceA   s    zHardswish.from_referencer   r   r   r   r   r   r   r   r   classmethodr)   r   r
   r
   r   r   r   )   s   
r   c                       sJ   e Zd ZdZd fdd	Zdd Zdd Zed	d
 Ze	dd Z
  ZS )ELUzThis is the quantized equivalent of :class:`~torch.nn.ELU`.

    Args:
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
        alpha: the alpha constant
          ?c                    s    t t| | || _|| _d S r   )r   r,   r   r   r   )r   r   r   alphar   r
   r   r   M   s    zELU.__init__c                 C   s   t jjjj|| j| j| jS r   )	r   r   r    r   r!   Zelur   r   r.   r   r
   r
   r   r   R   s       zELU.forwardc                 C   s   dS )NZQuantizedELUr
   r   r
   r
   r   r   V   s    zELU._get_namec                 C   s$   | j  \}}tt|t|| jS r   )r"   r#   r,   r$   r%   r.   r&   r
   r
   r   r   Y   s    zELU.from_floatc                 C   s   | t |t||jS r   )r$   r%   r.   r'   r
   r
   r   r)   ^   s    zELU.from_reference)r-   r*   r
   r
   r   r   r,   E   s   
r,   c                       sX   e Zd ZdZdeeeedd fddZdd	 Zd
d Z	e
dd Ze
dd Z  ZS )	LeakyReLUa  This is the quantized equivalent of :class:`~torch.nn.LeakyReLU`.

    Args:
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
        negative_slope: Controls the angle of the negative slope. Default: 1e-2
    {Gz?FN)r   r   negative_sloper   returnc                    sH   ||d}t  || | dtj|f| | dtj|f| d S )N)devicedtyper   r   )r   r   Zregister_bufferr   Ztensor)r   r   r   r1   r   r3   r4   Zfactory_kwargsr   r
   r   r   j   s    
zLeakyReLU.__init__c                 C   s   t jj|| j| j| j| jS r   )r   r   r   Z
leaky_relur1   r   r   r   r   r
   r
   r   r   q   s        zLeakyReLU.forwardc                 C   s   dS )NZQuantizedLeakyReLUr
   r   r
   r
   r   r   u   s    zLeakyReLU._get_namec                 C   s(   |j  \}}| t|t||j|jS r   )r"   r#   r$   r%   r1   r   r'   r
   r
   r   r   x   s    zLeakyReLU.from_floatc                 C   s   | t |t||j|jS r   )r$   r%   r1   r   r'   r
   r
   r   r)   }   s    zLeakyReLU.from_reference)r0   FNN)r   r   r   r   r$   r%   boolr   r   r   r+   r   r)   r   r
   r
   r   r   r/   b   s          
r/   c                       s<   e Zd ZdZeed fddZdd Zedd Z	  Z
S )	SigmoidzThis is the quantized equivalent of :class:`~torch.nn.Sigmoid`.

    Args:
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
    )output_scaleoutput_zero_pointc                    s   t    || _|| _d S r   )r   r   r7   r8   )r   r7   r8   r   r
   r   r      s    
zSigmoid.__init__c                 C   s   t jj|| j| jS r   )r   r   r   Zsigmoidr7   r8   r   r
   r
   r   r      s    zSigmoid.forwardc                 C   s    |j  \}}| t|t|S r   )r"   r#   r$   r%   )r(   r   r7   r8   r
   r
   r   r      s    zSigmoid.from_float)r   r   r   r   r$   r%   r   r   r+   r   r   r
   r
   r   r   r6      s
   r6   c                       sJ   e Zd ZdZd fdd	Zdd Zd	d
 Zedd Ze	dd Z
  ZS )Softmaxa,  This is the quantized version of :class:`~torch.nn.Softmax`.

    Args:
        dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1).
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
    Nr-   r   c                    s    t    || _|| _|| _d S r   )r   r   dimr   r   )r   r:   r   r   r   r
   r   r      s    
zSoftmax.__init__c                 C   s@   | j }|d kr(d}tjjd|  |}tjj||| j| j	S )N   softmax)
r:   r   r    r!   Z_get_softmax_dimr   r   r<   r   r   )r   r   r:   
stacklevelr
   r
   r   r      s         zSoftmax.forwardc                 C   s   dS )NZQuantizedSoftmaxr
   r   r
   r
   r   r      s    zSoftmax._get_namec                 C   s$   | j  \}}t| jt|t|S r   )r"   r#   r9   r:   r$   r%   r&   r
   r
   r   r      s    zSoftmax.from_floatc                 C   s   | |j t|t|S r   )r:   r$   r%   r'   r
   r
   r   r)      s    zSoftmax.from_reference)Nr-   r   r*   r
   r
   r   r   r9      s   
r9   c                   @   s8   e Zd ZejjjjZdd Z	e
dd Ze
dd ZdS )MultiheadAttentionc                 C   s   dS )NZQuantizedMultiheadAttentionr
   r   r
   r
   r   r      s    zMultiheadAttention._get_namec                 C   s   t dd S )NzpIt looks like you are trying to convert a non-observed MHA module. Please, see the examples on quantizable MHAs.)NotImplementedError)r(   otherr
   r
   r   r      s    zMultiheadAttention.from_floatc                 C   s   t jjj|d ddd d}| |_|jd k	rd|jd}t j|dd\}}t 	|||t j
}t|d| |jd k	r|jd}t j|dd\}}t 	|||t j
}t|d| |S )NFT)mappingr   Zremove_qconfigZconvert_custom_config_dictbias_k)Zreduce_rangebias_v)r   r   Zquantizationconvertr	   rB   _parameterspopZ_choose_qparams_per_tensorquantize_per_tensorquint8setattrrC   )r(   r@   Z	convertedrB   ZscZzprC   r
   r
   r   from_observed   s*    



z MultiheadAttention.from_observedN)r   r   r   r   r   r    quantizabler>   Z_FLOAT_MODULEr   r+   r   rJ   r
   r
   r
   r   r>      s   
r>   c                       st   e Zd ZdZdeeedd fddZejdddd	Z	ejejd
ddZ
dd Zedd Zedd Z  ZS )PReLUa%  This is the quantized equivalent of :class:`~torch.nn.PReLU`.

    Args:
        scale: quantization scale of the output tensor
        zero_point: quantization zero point of the output tensor
        num_parameters: number of parameters: 1, or the number of channels at input. Default: 1
       N)r7   r8   num_parametersr2   c                    sN   t    || _|| _|| _tj|tjd}tj|ddtj	d}| 
| d S )N)r4   r-   r   )r   r   r4   )r   r   rN   r   r   r   Zrandnr$   rG   rH   
set_weight)r   r7   r8   rN   wZqwr   r
   r   r      s    
zPReLU.__init__)rP   r2   c                 C   s
   || _ d S r   )weight)r   rP   r
   r
   r   rO      s    zPReLU.set_weight)r   r2   c                 C   s   t jj|| j| j| jS r   )r   r   r   ZprelurQ   r   r   r   r
   r
   r   r      s    zPReLU.forwardc                 C   s   dS )NZQuantizedPReLUr
   r   r
   r
   r   r      s    zPReLU._get_namec           
      C   sl   |j  \}}| t|t||j}|j }|j }| \}}t|t|t|tj	}	|
|	 |S r   )r"   r#   r$   r%   rN   rQ   qconfigr   rG   rH   rO   
r(   r   r   r   ZqpreluZfloat_wtZobserverZwt_scaleZwt_zpZqweightr
   r
   r   r     s    

   
zPReLU.from_floatc           
      C   s^   | t |t||j}|j  }|j }| \}}t|t |t|tj}	|	|	 |S r   )
r$   r%   rN   rQ   rR   r#   r   rG   rH   rO   rS   r
   r
   r   r)     s    

   
zPReLU.from_reference)rM   )r   r   r   r   r$   r%   r   r   ZTensorrO   r   r   r+   r   r)   r   r
   r
   r   r   rL      s     

rL   )r   r    ZReLUr   r   r,   r/   r6   r9   r   rK   r>   ModulerL   r
   r
   r
   r   <module>   s   &&*