U
    &ºcA  ã                	   @   s:   d Z ddlmZ ddddddd	d
dœZG dd„ deƒZdS )z ALBERT model configuration é   )ÚPretrainedConfigzNhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v1-config.jsonzOhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v1-config.jsonzPhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v1-config.jsonzQhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v1-config.jsonzNhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-base-v2-config.jsonzOhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-large-v2-config.jsonzPhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-xlarge-v2-config.jsonzQhttps://s3.amazonaws.com/models.huggingface.co/bert/albert-xxlarge-v2-config.json)zalbert-base-v1zalbert-large-v1zalbert-xlarge-v1zalbert-xxlarge-v1zalbert-base-v2zalbert-large-v2zalbert-xlarge-v2zalbert-xxlarge-v2c                       s*   e Zd ZdZeZdZd‡ fdd„	Z‡  ZS )ÚAlbertConfigaù  
        This is the configuration class to store the configuration of an :class:`~transformers.AlbertModel`.
        It is used to instantiate an ALBERT model according to the specified arguments, defining the model
        architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
        the ALBERT `xxlarge <https://huggingface.co/albert-xxlarge-v2>`__ architecture.

        Configuration objects inherit from  :class:`~transformers.PretrainedConfig` and can be used
        to control the model outputs. Read the documentation from  :class:`~transformers.PretrainedConfig`
        for more information.


        Args:
            vocab_size (:obj:`int`, optional, defaults to 30000):
                Vocabulary size of the ALBERT model. Defines the different tokens that
                can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.AlbertModel`.
            embedding_size (:obj:`int`, optional, defaults to 128):
                Dimensionality of vocabulary embeddings.
            hidden_size (:obj:`int`, optional, defaults to 4096):
                Dimensionality of the encoder layers and the pooler layer.
            num_hidden_layers (:obj:`int`, optional, defaults to 12):
                Number of hidden layers in the Transformer encoder.
            num_hidden_groups (:obj:`int`, optional, defaults to 1):
                Number of groups for the hidden layers, parameters in the same group are shared.
            num_attention_heads (:obj:`int`, optional, defaults to 64):
                Number of attention heads for each attention layer in the Transformer encoder.
            intermediate_size (:obj:`int`, optional, defaults to 16384):
                The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
            inner_group_num (:obj:`int`, optional, defaults to 1):
                The number of inner repetition of attention and ffn.
            hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu_new"):
                The non-linear activation function (function or string) in the encoder and pooler.
                If string, "gelu", "relu", "swish" and "gelu_new" are supported.
            hidden_dropout_prob (:obj:`float`, optional, defaults to 0):
                The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
            attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0):
                The dropout ratio for the attention probabilities.
            max_position_embeddings (:obj:`int`, optional, defaults to 512):
                The maximum sequence length that this model might ever be used with. Typically set this to something
                large (e.g., 512 or 1024 or 2048).
            type_vocab_size (:obj:`int`, optional, defaults to 2):
                The vocabulary size of the `token_type_ids` passed into :class:`~transformers.AlbertModel`.
            initializer_range (:obj:`float`, optional, defaults to 0.02):
                The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
            layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
                The epsilon used by the layer normalization layers.
            classifier_dropout_prob (:obj:`float`, optional, defaults to 0.1):
                The dropout ratio for attached classifiers.

        Example::

            from transformers import AlbertConfig, AlbertModel
            # Initializing an ALBERT-xxlarge style configuration
            albert_xxlarge_configuration = AlbertConfig()

            # Initializing an ALBERT-base style configuration
            albert_base_configuration = AlbertConfig(
                hidden_size=768,
                num_attention_heads=12,
                intermediate_size=3072,
            )

            # Initializing a model from the ALBERT-base style configuration
            model = AlbertModel(albert_xxlarge_configuration)

            # Accessing the model configuration
            configuration = model.config

        Attributes:
            pretrained_config_archive_map (Dict[str, str]):
                A dictionary containing all the available pre-trained checkpoints.
    Zalberté0u  é€   é   é   r   é@   é @  Úgelu_newé    é   é   ç{®Gáz”?çê-™—q=çš™™™™™¹?é   c                    s~   t ƒ jf |||dœ|—Ž || _|| _|| _|| _|| _|| _|| _|	| _	|| _
|
| _|| _|| _|| _|| _|| _|| _d S )N)Úpad_token_idÚbos_token_idÚeos_token_id)ÚsuperÚ__init__Ú
vocab_sizeÚembedding_sizeÚhidden_sizeÚnum_hidden_layersÚnum_hidden_groupsÚnum_attention_headsÚinner_group_numÚ
hidden_actÚintermediate_sizeÚhidden_dropout_probÚattention_probs_dropout_probÚmax_position_embeddingsÚtype_vocab_sizeÚinitializer_rangeÚlayer_norm_epsÚclassifier_dropout_prob)Úselfr   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r   r   r   Úkwargs©Ú	__class__© úE/tmp/pip-unpacked-wheel-ymerj3tt/transformers/configuration_albert.pyr   m   s"    zAlbertConfig.__init__)r   r   r   r   r   r   r	   r   r
   r   r   r   r   r   r   r   r   r   r   )	Ú__name__Ú
__module__Ú__qualname__Ú__doc__Ú$ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAPZpretrained_config_archive_mapZ
model_typer   Ú__classcell__r+   r+   r)   r,   r   !   s.   H                   ìr   N)r0   Zconfiguration_utilsr   r1   r   r+   r+   r+   r,   Ú<module>   s   ø