U
    Kc                     @   s@  d dl Z d dlmZ ddlmZ d dlmZmZ d dlm	Z	 d dl
mZmZmZmZ dd	 ZG d
d de	ZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG dd deZG d d! d!eZG d"d# d#eZG d$d% d%eZdS )&    N)Node   )all_node_args_have_no_tensors)PatternNodePattern)ABC)AnyCallableDictOptionalc                 C   s$   | d kr| S t | ts | d } q| S )N)
isinstancer   )node_pattern r   R/tmp/pip-unpacked-wheel-gikjz4vx/torch/ao/quantization/fx/quantization_patterns.py_default_root_node_getter   s
    

r   c                   @   st   e Zd ZdZdeeeejj	f e
dddZeddd	Zedd
dZeeeee
 dddZdd Zdd ZdS )QuantizeHandlerz3 Base handler class for the quantizer patterns
    NF)r   modulesroot_node_getterc           	      C   s   || _ || _|dkrt}||| _|| _|| _d| _t| jtri }t	t
| jjD ]6}| jj| }t|trTt|| j|sT|  jd7  _qTdS )zX Records pattern information in __init__, which will be used
        in convert
        Nr   r   )r   r   r   Z	root_nodeis_custom_module_is_standalone_module_Znum_tensor_argsr   r   rangelenargsr   )	selfr   r   r   is_custom_moduleis_standalone_moduleZcache_for_no_tensor_checkZarg_idxargr   r   r   __init__   s(    


  zQuantizeHandler.__init__)returnc                 C   s   dS )z
        Returns True if the pattern matched to this qhandler could be
        be observed, and False it it should not be observed.
        Tr   r   r   r   r   input_output_observed<   s    z%QuantizeHandler.input_output_observedc                 C   s   dS )a  
        Returns True if the operator works for both floating point and
        quantized input, and does some computation based on the input Tensor,
        or the ops that only re-arranges the Tensor values or query some metadata
        about the Tensor
        so we need to insert observer/fake_quant for the output of the
        operator (same observer instance as input)
        since the distribution of values is different for input and output
        Tensors (for HistogramObserver) while they share the same quantization
        parameters
        Example operator: avgpool2d, reshape, transpose, maxpool2d
        Example observed operator:
        observer_0 - avgpool2d - observer_0 (same observer instance as input)
        Fr   r    r   r   r   is_general_tensor_value_opC   s    z*QuantizeHandler.is_general_tensor_value_op)qconfigpatternis_trainingr   c                 C   s   |j S )z
        Returns the constructor for the activation observer which should be
        used for the pattern matched to this handler. Some handlers override
        this to a different value than what is specified in the qconfig.
        )Z
activation)r   r#   r$   r%   r   r   r   get_activation_ctrT   s    z"QuantizeHandler.get_activation_ctrc                 C   s   | j S N)r   r    r   r   r   r   a   s    z QuantizeHandler.is_custom_modulec                 C   s   | j S r'   )r   r    r   r   r   r   d   s    z$QuantizeHandler.is_standalone_module)NFF)__name__
__module____qualname____doc__r   r
   strtorchnnModuler	   r   boolr!   r"   r   r   r   r&   r   r   r   r   r   r   r      s$      r   c                   @   s   e Zd ZdS )BinaryOpQuantizeHandlerNr(   r)   r*   r   r   r   r   r1   i   s   r1   c                   @   s   e Zd ZdS )CatQuantizeHandlerNr2   r   r   r   r   r3   l   s   r3   c                   @   s   e Zd ZdS )ConvReluQuantizeHandlerNr2   r   r   r   r   r4   p   s   r4   c                   @   s   e Zd ZdS )LinearReLUQuantizeHandlerNr2   r   r   r   r   r5   t   s   r5   c                   @   s   e Zd ZdS )BatchNormQuantizeHandlerNr2   r   r   r   r   r6   x   s   r6   c                   @   s   e Zd ZdS )EmbeddingQuantizeHandlerNr2   r   r   r   r   r7   |   s   r7   c                   @   s   e Zd ZdS )RNNDynamicQuantizeHandlerNr2   r   r   r   r   r8      s   r8   c                   @   s   e Zd ZdZdS )DefaultNodeQuantizeHandlerzI Common quantized op, first input and first output will be quantized
    N)r(   r)   r*   r+   r   r   r   r   r9      s   r9   c                   @   s   e Zd ZdS )FixedQParamsOpQuantizeHandlerNr2   r   r   r   r   r:      s   r:   c                   @   s   e Zd ZdS )CopyNodeQuantizeHandlerNr2   r   r   r   r   r;      s   r;   c                   @   s   e Zd ZdS )#GeneralTensorShapeOpQuantizeHandlerNr2   r   r   r   r   r<      s   r<   c                   @   s   e Zd ZdS )CustomModuleQuantizeHandlerNr2   r   r   r   r   r=      s   r=   c                   @   s   e Zd ZdS )StandaloneModuleQuantizeHandlerNr2   r   r   r   r   r>      s   r>   )r-   Ztorch.fx.graphr   utilsr   Z(torch.ao.quantization.quantization_typesr   r   abcr   typingr   r	   r
   r   r   r   r1   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r   r   r   r   <module>   s(   	O