U
    &c )                     @   s@   d Z ddlZddlmZ eeZdddZG dd	 d	eZdS )
z XLNet configuration     N   )PretrainedConfigzPhttps://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.jsonzQhttps://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json)zxlnet-base-casedzxlnet-large-casedc                       st   e Zd ZdZeZdZd# fdd	Zedd Z	edd Z
e
jdd Z
edd Zedd  Zed!d" Z  ZS )$XLNetConfiga  
        This is the configuration class to store the configuration of a :class:`~transformers.XLNetModel`.
        It is used to instantiate an XLNet model according to the specified arguments, defining the model
        architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
        the `xlnet-large-cased <https://huggingface.co/xlnet-large-cased>`__ architecture.

        Configuration objects inherit from  :class:`~transformers.PretrainedConfig` and can be used
        to control the model outputs. Read the documentation from  :class:`~transformers.PretrainedConfig`
        for more information.

        Args:
            vocab_size (:obj:`int`, optional, defaults to 32000):
                Vocabulary size of the XLNet model. Defines the different tokens that
                can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.XLNetModel`.
            d_model (:obj:`int`, optional, defaults to 1024):
                Dimensionality of the encoder layers and the pooler layer.
            n_layer (:obj:`int`, optional, defaults to 24):
                Number of hidden layers in the Transformer encoder.
            n_head (:obj:`int`, optional, defaults to 16):
                Number of attention heads for each attention layer in the Transformer encoder.
            d_inner (:obj:`int`, optional, defaults to 4096):
                Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
            ff_activation (:obj:`string`, optional, defaults to "gelu"):
                The non-linear activation function (function or string) in the
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
            untie_r (:obj:`boolean`, optional, defaults to :obj:`True`):
                Untie relative position biases
            attn_type (:obj:`string`, optional, defaults to "bi"):
                The attention type used by the model. Set 'bi' for XLNet, 'uni' for Transformer-XL.
            initializer_range (:obj:`float`, optional, defaults to 0.02):
                The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
            layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
                The epsilon used by the layer normalization layers.
            dropout (:obj:`float`, optional, defaults to 0.1):
                The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
            mem_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
                The number of tokens to cache. The key/value pairs that have already been pre-computed
                in a previous forward pass won't be re-computed. See the
                `quickstart <https://huggingface.co/transformers/quickstart.html#using-the-past>`__
                for more information.
            reuse_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
                The number of tokens in the current batch to be cached and reused in the future.
            bi_data (:obj:`boolean`, optional, defaults to :obj:`False`):
                Whether to use bidirectional input pipeline. Usually set to `True` during
                pretraining and `False` during finetuning.
            clamp_len (:obj:`int`, optional, defaults to -1):
                Clamp all relative distances larger than clamp_len.
                Setting this attribute to -1 means no clamping.
            same_length (:obj:`boolean`, optional, defaults to :obj:`False`):
                Whether to use the same attention length for each token.
            summary_type (:obj:`string`, optional, defaults to "last"):
                Argument used when doing sequence summary. Used in for the multiple choice head in
                :class:transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
                Is one of the following options:
                    - 'last' => take the last token hidden state (like XLNet)
                    - 'first' => take the first token hidden state (like Bert)
                    - 'mean' => take the mean of all tokens hidden states
                    - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
                    - 'attn' => Not implemented now, use multi-head attention
            summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):
                Argument used when doing sequence summary. Used in for the multiple choice head in
                :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
                Add a projection after the vector extraction
            summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):
                Argument used when doing sequence summary. Used in for the multiple choice head in
                :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
                'tanh' => add a tanh activation to the output, Other => no activation.
            summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):
                Argument used when doing sequence summary. Used in for the multiple choice head in
                :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
                If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
            summary_last_dropout (:obj:`float`, optional, defaults to 0.1):
                Argument used when doing sequence summary. Used in for the multiple choice head in
                :class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
                Add a dropout after the projection and activation
            start_n_top (:obj:`int`, optional, defaults to 5):
                Used in the SQuAD evaluation script for XLM and XLNet.
            end_n_top (:obj:`int`, optional, defaults to 5):
                Used in the SQuAD evaluation script for XLM and XLNet.

        Example::

            from transformers import XLNetConfig, XLNetModel

            # Initializing a XLNet configuration
            configuration = XLNetConfig()

            # Initializing a model from the configuration
            model = XLNetModel(configuration)

            # Accessing the model configuration
            configuration = model.config

        Attributes:
            pretrained_config_archive_map (Dict[str, str]):
                A dictionary containing all the available pre-trained checkpoints.
    Zxlnet }              geluTbi{Gz?-q=皙?NFlasttanh   r      c                    s   t  jf |||d| || _|| _|| _|| _|| dksBt|| | _|| _|| _	|| _
|| _|	| _|
| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _|| _dS )z Constructs XLNetConfig.
        )pad_token_idbos_token_ideos_token_idr   N)super__init__
vocab_sized_modeln_layern_headAssertionErrorZd_headff_activationd_inneruntie_r	attn_typeinitializer_rangelayer_norm_epsdropoutmem_len	reuse_lenbi_data	clamp_lensame_lengthsummary_typesummary_use_projsummary_activationsummary_last_dropoutstart_n_top	end_n_topr   r   r   )selfr   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r   r   r   kwargs	__class__ D/tmp/pip-unpacked-wheel-ymerj3tt/transformers/configuration_xlnet.pyr      s8    
zXLNetConfig.__init__c                 C   s   dS )Nr   r4   r0   r4   r4   r5   max_position_embeddings   s    z#XLNetConfig.max_position_embeddingsc                 C   s   | j S Nr   r6   r4   r4   r5   n_token   s    zXLNetConfig.n_tokenc                 C   s
   || _ d S r8   r9   )r0   valuer4   r4   r5   r:      s    c                 C   s   | j S r8   )r   r6   r4   r4   r5   hidden_size   s    zXLNetConfig.hidden_sizec                 C   s   | j S r8   )r   r6   r4   r4   r5   num_attention_heads   s    zXLNetConfig.num_attention_headsc                 C   s   | j S r8   )r   r6   r4   r4   r5   num_hidden_layers   s    zXLNetConfig.num_hidden_layers)r   r   r   r   r	   r
   Tr   r   r   r   NNFr   Fr   Tr   r   r   r   r   r   r   )__name__
__module____qualname____doc__#XLNET_PRETRAINED_CONFIG_ARCHIVE_MAPZpretrained_config_archive_mapZ
model_typer   propertyr7   r:   setterr<   r=   r>   __classcell__r4   r4   r2   r5   r       sR   b                         @




r   )	rB   loggingZconfiguration_utilsr   	getLoggerr?   loggerrC   r   r4   r4   r4   r5   <module>   s   
