U
    &ºcê)  ã                   @   sX  d Z ddlZddlmZ ddlmZmZmZmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lm Z m!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z)m*Z* ddl+m,Z,m-Z- ddl.m/Z/ ddl0m1Z1m2Z2 ddl3m4Z4 ddl5m6Z6m7Z7 ddl8m9Z9 ddl:m;Z;m<Z< ddl=m>Z> ddl?m@Z@mAZA ddlBmCZC ddlDmEZE ddlFmGZG e HeI¡ZJeee>dffee)e*ffeedffe	e%dffeeEdffee4dffeedffee;e<ffee9dffee,e-ffee e!ffee6e7ffee1e2ffee@eAffeeGdffee/dffeeCdffe
e'dffgƒZKG dd„ dƒZLdS )z Auto Tokenizer class. é    N)ÚOrderedDicté   )ÚAlbertConfigÚ
AutoConfigÚ
BartConfigÚ
BertConfigÚCamembertConfigÚ
CTRLConfigÚDistilBertConfigÚElectraConfigÚFlaubertConfigÚ
GPT2ConfigÚOpenAIGPTConfigÚReformerConfigÚRobertaConfigÚT5ConfigÚTransfoXLConfigÚ	XLMConfigÚXLMRobertaConfigÚXLNetConfig)ÚMarianConfig)ÚPretrainedConfig)ÚAlbertTokenizer)ÚBartTokenizer)ÚBertTokenizerÚBertTokenizerFast)ÚBertJapaneseTokenizer)ÚCamembertTokenizer)ÚCTRLTokenizer)ÚDistilBertTokenizerÚDistilBertTokenizerFast)ÚElectraTokenizerÚElectraTokenizerFast)ÚFlaubertTokenizer)ÚGPT2TokenizerÚGPT2TokenizerFast)ÚMarianTokenizer)ÚOpenAIGPTTokenizerÚOpenAIGPTTokenizerFast)ÚReformerTokenizer)ÚRobertaTokenizerÚRobertaTokenizerFast)ÚT5Tokenizer)ÚTransfoXLTokenizerÚTransfoXLTokenizerFast)ÚXLMTokenizer)ÚXLMRobertaTokenizer)ÚXLNetTokenizerc                   @   s$   e Zd ZdZdd„ Zedd„ ƒZdS )ÚAutoTokenizeraÌ  :class:`~transformers.AutoTokenizer` is a generic tokenizer class
        that will be instantiated as one of the tokenizer classes of the library
        when created with the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)`
        class method.

        The `from_pretrained()` method take care of returning the correct tokenizer class instance
        based on the `model_type` property of the config object, or when it's missing,
        falling back to using pattern matching on the `pretrained_model_name_or_path` string.

        The tokenizer class to instantiate is selected as the first pattern matching
        in the `pretrained_model_name_or_path` string (in the following order):
            - contains `t5`: T5Tokenizer (T5 model)
            - contains `distilbert`: DistilBertTokenizer (DistilBert model)
            - contains `albert`: AlbertTokenizer (ALBERT model)
            - contains `camembert`: CamembertTokenizer (CamemBERT model)
            - contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
            - contains `roberta`: RobertaTokenizer (RoBERTa model)
            - contains `bert`: BertTokenizer (Bert model)
            - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
            - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
            - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
            - contains `xlnet`: XLNetTokenizer (XLNet model)
            - contains `xlm`: XLMTokenizer (XLM model)
            - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
            - contains `electra`: ElectraTokenizer (Google ELECTRA model)

        This class cannot be instantiated using `__init__()` (throw an error).
    c                 C   s   t dƒ‚d S )Nz}AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.)ÚEnvironmentError)Úself© r5   úB/tmp/pip-unpacked-wheel-ymerj3tt/transformers/tokenization_auto.pyÚ__init__y   s    ÿzAutoTokenizer.__init__c           	   	   O   sÈ   |  dd¡}t|tƒs$tj|f|Ž}d|kr>tj|f|ž|ŽS |  dd¡}t ¡ D ]J\}\}}t||ƒrR|r†|r†|j|f|ž|Ž  S |j|f|ž|Ž  S qRtd 	|j
d dd	„ t ¡ D ƒ¡¡ƒ‚dS )
a   Instantiate one of the tokenizer classes of the library
        from a pre-trained model vocabulary.

        The tokenizer class to instantiate is selected as the first pattern matching
        in the `pretrained_model_name_or_path` string (in the following order):
            - contains `t5`: T5Tokenizer (T5 model)
            - contains `distilbert`: DistilBertTokenizer (DistilBert model)
            - contains `albert`: AlbertTokenizer (ALBERT model)
            - contains `camembert`: CamembertTokenizer (CamemBERT model)
            - contains `xlm-roberta`: XLMRobertaTokenizer (XLM-RoBERTa model)
            - contains `roberta`: RobertaTokenizer (RoBERTa model)
            - contains `bert-base-japanese`: BertJapaneseTokenizer (Bert model)
            - contains `bert`: BertTokenizer (Bert model)
            - contains `openai-gpt`: OpenAIGPTTokenizer (OpenAI GPT model)
            - contains `gpt2`: GPT2Tokenizer (OpenAI GPT-2 model)
            - contains `transfo-xl`: TransfoXLTokenizer (Transformer-XL model)
            - contains `xlnet`: XLNetTokenizer (XLNet model)
            - contains `xlm`: XLMTokenizer (XLM model)
            - contains `ctrl`: CTRLTokenizer (Salesforce CTRL model)
            - contains `electra`: ElectraTokenizer (Google ELECTRA model)

        Params:
            pretrained_model_name_or_path: either:

                - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
                - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
                - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
                - (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.

            cache_dir: (`optional`) string:
                Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.

            force_download: (`optional`) boolean, default False:
                Force to (re-)download the vocabulary files and override the cached versions if they exists.

            resume_download: (`optional`) boolean, default False:
                Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.

            proxies: (`optional`) dict, default None:
                A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
                The proxies are used on each request.

            use_fast: (`optional`) boolean, default False:
                Indicate if transformers should try to load the fast version of the tokenizer (True) or use the Python one (False).

            inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.

            kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.

        Examples::

            # Download vocabulary from S3 and cache.
            tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

            # Download vocabulary from S3 (user-uploaded) and cache.
            tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')

            # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
            tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')

        ÚconfigNzbert-base-japaneseÚuse_fastFz^Unrecognized configuration class {} to build an AutoTokenizer.
Model type should be one of {}.z, c                 s   s   | ]}|j V  qd S )N)Ú__name__)Ú.0Úcr5   r5   r6   Ú	<genexpr>Ð   s     z0AutoTokenizer.from_pretrained.<locals>.<genexpr>)ÚpopÚ
isinstancer   r   Úfrom_pretrainedr   ÚTOKENIZER_MAPPINGÚitemsÚ
ValueErrorÚformatÚ	__class__ÚjoinÚkeys)	ÚclsZpretrained_model_name_or_pathÚinputsÚkwargsr8   r9   Zconfig_classZtokenizer_class_pyZtokenizer_class_fastr5   r5   r6   r@      s"    ?

 þÿzAutoTokenizer.from_pretrainedN)r:   Ú
__module__Ú__qualname__Ú__doc__r7   Úclassmethodr@   r5   r5   r5   r6   r2   [   s   r2   )MrM   ÚloggingÚcollectionsr   Zconfiguration_autor   r   r   r   r   r	   r
   r   r   r   r   r   r   r   r   r   r   r   Zconfiguration_marianr   Zconfiguration_utilsr   Ztokenization_albertr   Ztokenization_bartr   Ztokenization_bertr   r   Ztokenization_bert_japaneser   Ztokenization_camembertr   Ztokenization_ctrlr   Ztokenization_distilbertr   r    Ztokenization_electrar!   r"   Ztokenization_flaubertr#   Ztokenization_gpt2r$   r%   Ztokenization_marianr&   Ztokenization_openair'   r(   Ztokenization_reformerr)   Ztokenization_robertar*   r+   Ztokenization_t5r,   Ztokenization_transfo_xlr-   r.   Ztokenization_xlmr/   Ztokenization_xlm_robertar0   Ztokenization_xlnetr1   Ú	getLoggerr:   ÚloggerrA   r2   r5   r5   r5   r6   Ú<module>   s^   P


















îÿ