U
    &c                    @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
mZ ddlmZ ddlmZmZmZmZmZmZmZmZ ddlmZ ddlmZ ddlmZ dd	lm Z! d
dl"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z( e& rddl)Z*e' rddl+Z+e,e-Z.dZ/dZ0dZ1e2dZ3e2dZ4e5Z6ee5 Z7ee2 Z8ee5e5f Z9eee5 ee5 f Z:eee2 ee2 f Z;G dd deZ<G dd deZ=edddZ>ee!e2e2e5e?e5e2e2e5d	ddZ@G dd de
ZAG dd dZBG dd  d eBZCG d!d" d"eCZDd%d#d$ZEdS )&zvTokenization classes for python and fast tokenizers. Fast tokenizers are provided by HuggingFace's tokenizers library.    N)UserDictdefaultdict)contextmanager)AnyDictList
NamedTupleOptionalSequenceTupleUnion)
AddedToken)Encoding)Decoder)BaseTokenizer   )cached_pathhf_bucket_urlis_remote_urlis_tf_availableis_torch_availabletorch_requiredzspecial_tokens_map.jsonzadded_tokens.jsonztokenizer_config.jsonꌠ9Y>)Fg@xDc                   @   s"   e Zd ZU dZeed< eed< dS )CharSpanz Character span in the original string

        Args:
            start: index of the first character in the original string
            end: index of the character following the last character in the original string
    startendN__name__
__module____qualname____doc__int__annotations__ r#   r#   C/tmp/pip-unpacked-wheel-ymerj3tt/transformers/tokenization_utils.pyr   <   s   
r   c                   @   s"   e Zd ZU dZeed< eed< dS )	TokenSpanz Token span in an encoded string (list of tokens)

        Args:
            start: index of the first token in the span
            end: index of the token following the last token in the span
    r   r   Nr   r#   r#   r#   r$   r%   H   s   
r%   xc                 C   s   t tj| g S )z
    Flatten the provided (potentially nested) sequence

    Args:
        x (Sequence): Potentially nested sequence to flatten

    Returns:
        list: Flattened sequence
    )	functoolsreduceoperatoriconcatr&   r#   r#   r$   flattenT   s    r,   		tokenizer
max_lengthstridestrategypad_to_max_lengthpadding_sidepad_token_idpad_token_type_id	pad_tokenc	           	      c   s   |dk	r| j |||d |r>|r>|dkr>| j|||||d n|rTtd|| dV  |dk	rj|   |r|r|dkr|   dS )a   This contextmanager is in charge of defining the truncation and the padding strategies for fast tokenizers
        (provided by HuggingFace tokenizers library) and restore the tokenizer settings afterwards.

        This contextmanager assumes the provider tokenizer has no padding / truncation strategy
        before the managed section. If your tokenizer set a padding / truncation strategy before,
        then it will be reset to no padding/truncation when exiting the managed section.

        Args:
            tokenizer (BaseTokenizerFast): The tokenizer which will be used
            max_length (int): The maximum size of the sequence
            stride (int): The stride to use when handling overflow
            strategy (str): Overflowing logic to use
            pad_to_max_length (bool): Boolean indicating if the output needs to be padded up to max_length
            padding_side (str): "left" or "right" indicating the direction the output sequence will be padded
            pad_token_id (int): The integer representation of the padding token to use
            pad_token_type_id (int): The integer representation of the padding token type to use
            pad_token (str): The string representation of the padding token to use

    N)r0   r1   r   )r/   	directionZpad_idZpad_type_idr6   zDisabled padding because no padding token set (pad_token: {}, pad_token_id: {}).
To remove this error, you can add a new pad token and then resize model embedding:
	tokenizer.pad_token = '<PAD>'
	model.resize_token_embeddings(len(tokenizer)))Zenable_truncationZenable_paddingloggerwarningformatZno_truncationZ
no_paddingr-   r#   r#   r$   truncate_and_padb   s,    ! r;   c                       sp  e Zd ZdZd-eeeef  eee	e
e	 f  d fddZeeef e	dddZed	d
dZdd Zdd Zdd Zeeee	  dddZd.eee dddZd/eeee  dddZd0eee edddZd1eee eddd Zd2eee edd!d"Zd3eee ed#d$d%Zd4eee edd&d'Zd5eee ed#d(d)Zeed*d+d,Z   Z!S )6BatchEncodinga   BatchEncoding hold the output of the encode and batch_encode methods (tokens, attention_masks, etc).
        This class is derived from a python Dictionary and can be used as a dictionnary.
        In addition, this class expose utility methods to map from word/char space to token space.

        Args:
            data (:obj:`dict`): Dictionary of lists/arrays returned by the encode/batch_encode methods ('input_ids', 'attention_mask'...)
            encoding (:obj:`EncodingFast`, :obj:`list(EncodingFast)`, `optional`, defaults to :obj:`None`):
                If the tokenizer is a fast tokenizer which outputs additional informations like mapping from word/char space to token space
                the `EncodingFast` instance or list of instance (for batches) hold these informations.

    N)dataencodingc                    s&   t  | t|tr|g}|| _d S N)super__init__
isinstanceEncodingFast
_encodings)selfr=   r>   	__class__r#   r$   rA      s    
zBatchEncoding.__init__)itemreturnc                 C   s4   t |tr| j| S | jdk	r(| j| S tddS )z If the key is a string, get the value of the dict associated to `key` ('input_ids', 'attention_mask'...)
            If the key is an integer, get the EncodingFast for batch item with index `key`
        NzIndexing with integers (to access backend Encoding for a given batch index) is not available when using Python based tokenizers)rB   strr=   rD   KeyErrorrE   rH   r#   r#   r$   __getitem__   s    



zBatchEncoding.__getitem__)rH   c                 C   s
   | j | S r?   )r=   rL   r#   r#   r$   __getattr__   s    zBatchEncoding.__getattr__c                 C   s
   | j  S r?   )r=   keysrE   r#   r#   r$   rO      s    zBatchEncoding.keysc                 C   s
   | j  S r?   )r=   valuesrP   r#   r#   r$   rQ      s    zBatchEncoding.valuesc                 C   s
   | j  S r?   r=   itemsrP   r#   r#   r$   rS      s    zBatchEncoding.itemsrI   c                 C   s   | j S )z
        Return the list all encoding from the tokenization process

        Returns: List[EncodingFast] or None if input was tokenized through Python (i.e. not fast) tokenizer
        )rD   rP   r#   r#   r$   	encodings   s    zBatchEncoding.encodingsr   )batch_indexrI   c                 C   s   | j std| j | jS )Nz<tokens() is not available when using Python based tokenizers)rD   
ValueErrortokensrE   rV   r#   r#   r$   rX      s    zBatchEncoding.tokensc                 C   s   | j std| j | jS )Nz;words() is not available when using Python based tokenizers)rD   rW   wordsrY   r#   r#   r$   rZ      s    zBatchEncoding.words)batch_or_token_indextoken_indexrI   c                 C   sX   | j std|dk	r|}nd}|}|dk r6| j| }|dk rH| j| }| j | |S )aW   Get the index of the word corresponding (i.e. comprising) to an encoded token
            in a sequence of the batch.

            Can be called as:
                - self.token_to_word(token_index) if batch size is 1
                - self.token_to_word(batch_index, token_index) if batch size is greater than 1

            This method is particularly suited when the input sequences are provided as
            pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
            to easily associate encoded tokens with provided tokenized words.

        Args:
            batch_or_token_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence,
                this can be the index of the token in the sequence
            token_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the token in the sequence.

        Returns:
            word_index (:obj:`int`):
                index of the word in the input sequence.

        zCtoken_to_word() is not available when using Python based tokenizersNr   )rD   rW   _batch_size_seq_lentoken_to_wordrE   r[   r\   rV   r#   r#   r$   r_      s    

zBatchEncoding.token_to_word)batch_or_word_index
word_indexrI   c                 C   s\   | j std|dk	r|}nd}|}|dk r6| j| }|dk rH| j| }t| j | | S )a   Get the encoded token span corresponding to a word in the sequence of the batch.

            Token spans are returned as a TokenSpan NamedTuple with:
                start: index of the first token
                end: index of the token following the last token

            Can be called as:
                - self.word_to_tokens(word_index) if batch size is 1
                - self.word_to_tokens(batch_index, word_index) if batch size is greater or equal to 1

            This method is particularly suited when the input sequences are provided as
            pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
            to easily associate encoded tokens with provided tokenized words.

        Args:
            batch_or_word_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprises one sequence,
                this can be the index of the word in the sequence
            word_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the word in the sequence.

        Returns:
            token_span (:obj:`TokenSpan`):
                Span of tokens in the encoded sequence.

                TokenSpan are NamedTuple with:
                    start: index of the first token
                    end: index of the token following the last token
        zDword_to_tokens() is not available when using Python based tokenizersNr   )rD   rW   r]   r^   r%   word_to_tokensrE   ra   rb   rV   r#   r#   r$   rc     s     

zBatchEncoding.word_to_tokensc                 C   s8   | j std|dk	r|}nd}|}t| j | | S )aL   Get the character span corresponding to an encoded token in a sequence of the batch.

            Character spans are returned as a CharSpan NamedTuple with:
                start: index of the first character in the original string associated to the token
                end: index of the character following the last character in the original string associated to the token

            Can be called as:
                - self.token_to_chars(token_index) if batch size is 1
                - self.token_to_chars(batch_index, token_index) if batch size is greater or equal to 1

        Args:
            batch_or_token_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence,
                this can be the index of the token in the sequence
            token_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the token or tokens in the sequence.

        Returns:
            char_span (:obj:`CharSpan`):
                Span of characters in the original string.

                CharSpan are NamedTuple with:
                    start: index of the first character in the original string
                    end: index of the character following the last character in the original string
        zDtoken_to_chars() is not available when using Python based tokenizersNr   )rD   rW   r   token_to_charsr`   r#   r#   r$   re   A  s    zBatchEncoding.token_to_chars)batch_or_char_index
char_indexrI   c                 C   s4   | j std|dk	r|}nd}|}| j | |S )aV   Get the index of the token in the encoded output comprising a character
            in the original string for a sequence of the batch.

            Can be called as:
                - self.char_to_token(char_index) if batch size is 1
                - self.char_to_token(batch_index, char_index) if batch size is greater or equal to 1

            This method is particularly suited when the input sequences are provided as
            pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
            to easily associate encoded tokens with provided tokenized words.

        Args:
            batch_or_char_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence,
                this can be the index of the word in the sequence
            char_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the word in the sequence.


        Returns:
            token_index (:obj:`int`):
                Index of the token.
        zCchar_to_token() is not available when using Python based tokenizersNr   )rD   rW   char_to_tokenrE   rf   rg   rV   r#   r#   r$   rh   f  s    zBatchEncoding.char_to_tokenc                 C   s8   | j std|dk	r|}nd}|}t| j | | S )a   Get the character span in the original string corresponding to given word in a sequence
            of the batch.

            Character spans are returned as a CharSpan NamedTuple with:
                start: index of the first character in the original string
                end: index of the character following the last character in the original string

            Can be called as:
                - self.word_to_chars(word_index) if batch size is 1
                - self.word_to_chars(batch_index, word_index) if batch size is greater or equal to 1

        Args:
            batch_or_word_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence,
                this can be the index of the word in the sequence
            word_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the word in the sequence.

        Returns:
            char_span (:obj:`CharSpan` or :obj:`List[CharSpan]`):
                Span(s) of the associated character or characters in the string.
                CharSpan are NamedTuple with:
                    start: index of the first character associated to the token in the original string
                    end: index of the character following the last character associated to the token in the original string
        zCword_to_chars() is not available when using Python based tokenizersNr   )rD   rW   r   word_to_charsrd   r#   r#   r$   rj     s    zBatchEncoding.word_to_charsc                 C   s4   | j std|dk	r|}nd}|}| j | |S )a   Get the word in the original string corresponding to a character in the original string of
            a sequence of the batch.

            Can be called as:
                - self.char_to_word(char_index) if batch size is 1
                - self.char_to_word(batch_index, char_index) if batch size is greater than 1

            This method is particularly suited when the input sequences are provided as
            pre-tokenized sequences (i.e. words are defined by the user). In this case it allows
            to easily associate encoded tokens with provided tokenized words.

        Args:
            batch_or_char_index (:obj:`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence,
                this can be the index of the character in the orginal string.
            char_index (:obj:`int`, `optional`):
                If a batch index is provided in `batch_or_token_index`, this can be the index
                of the character in the orginal string.


        Returns:
            token_index (:obj:`int` or :obj:`List[int]`):
                Index or indices of the associated encoded token(s).
        zBchar_to_word() is not available when using Python based tokenizersNr   )rD   rW   char_to_wordri   r#   r#   r$   rk     s    zBatchEncoding.char_to_worddevicec                    s    fdd| j  D | _ | S )z1Send all values to device by calling v.to(device)c                    s   i | ]\}}||  qS r#   )to.0kvrl   r#   r$   
<dictcomp>  s      z$BatchEncoding.to.<locals>.<dictcomp>rR   )rE   rm   r#   rl   r$   rn     s    zBatchEncoding.to)NN)r   )r   )N)N)N)N)N)N)"r   r   r   r    r	   r   rJ   r   r   rC   r
   rA   r!   rM   rN   rO   rQ   rS   propertyr   rU   rX   rZ   r_   r%   rc   r   re   rh   rj   rk   r   rn   __classcell__r#   r#   rF   r$   r<      s0     '-%#%#r<   c                   @   s  e Zd ZdZdddddddd	gZd
d Zedd Zedd Zedd Z	edd Z
edd Zedd Zedd Zedd Zdd Zejdd Zejdd Ze	jd d Z	e
jd!d Z
ejd"d Zejd#d Zejd$d Zejd%d Zed&d' Zed(d) Zed*d+ Zed,d- Zed.d/ Zed0d1 Zed2d3 Zed4d5 Zed6d7 Zed8d9 Zed:d; Zed<d= Zd>S )?SpecialTokensMixinai   SpecialTokensMixin is derived by ``PreTrainedTokenizer`` and ``PreTrainedTokenizerFast`` and
        handles specific behaviors related to special tokens. In particular, this class hold the
        attributes which can be used to directly access to these special tokens in a
        model-independant manner and allow to set and update the special tokens.
    	bos_token	eos_token	unk_token	sep_tokenr6   	cls_token
mask_tokenadditional_special_tokensc                 K   s   d | _ d | _d | _d | _d | _d | _d | _d| _g | _|	 D ]\}}|| j
kr>|dkr~t|ttfrxtdd |D stq>t|trt| |t| q>t|trt| || q>td|t|q>d S )Nr   r}   c                 s   s   | ]}t |tV  qd S r?   rB   rJ   rp   tr#   r#   r$   	<genexpr>  s     z.SpecialTokensMixin.__init__.<locals>.<genexpr>zCspecial token {} has to be either str or AddedTokenFast but got: {})
_bos_token
_eos_token
_unk_token
_sep_token
_pad_token
_cls_token_mask_token_pad_token_type_id_additional_special_tokensrS   SPECIAL_TOKENS_ATTRIBUTESrB   listtupleallAssertionErrorAddedTokenFastsetattrrJ   	TypeErrorr:   type)rE   kwargskeyvaluer#   r#   r$   rA     s(    
&

zSpecialTokensMixin.__init__c                 C   s   | j dkrtd | j S )zW Beginning of sentence token (string). Log an error if used while not having been set. Nz'Using bos_token, but it is not set yet.)r   r8   errorrP   r#   r#   r$   rw     s    

zSpecialTokensMixin.bos_tokenc                 C   s   | j dkrtd | j S )zQ End of sentence token (string). Log an error if used while not having been set. Nz'Using eos_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   rx   	  s    

zSpecialTokensMixin.eos_tokenc                 C   s   | j dkrtd | j S )zI Unknown token (string). Log an error if used while not having been set. Nz'Using unk_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   ry     s    

zSpecialTokensMixin.unk_tokenc                 C   s   | j dkrtd | j S )z Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. Nz'Using sep_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   rz     s    

zSpecialTokensMixin.sep_tokenc                 C   s   | j dkrtd | j S )zI Padding token (string). Log an error if used while not having been set. Nz'Using pad_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   r6     s    

zSpecialTokensMixin.pad_tokenc                 C   s   | j dkrtd | j S )z Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. Nz'Using cls_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   r{   %  s    

zSpecialTokensMixin.cls_tokenc                 C   s   | j dkrtd | j S )z Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. Nz(Using mask_token, but it is not set yet.)r   r8   r   rP   r#   r#   r$   r|   ,  s    

zSpecialTokensMixin.mask_tokenc                 C   s   | j dkrtd | j S )zz All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. Nz7Using additional_special_tokens, but it is not set yet.)r   r8   r   rP   r#   r#   r$   r}   3  s    

z,SpecialTokensMixin.additional_special_tokensc                 C   s   dS )zL To be overriden by derived class if a backend tokenizer has to be updated. Nr#   rE   r   r#   r#   r$   _maybe_update_backend:  s    z(SpecialTokensMixin._maybe_update_backendc                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   rw   >  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   rx   C  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   ry   H  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   rz   M  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   r6   R  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   r{   W  s    c                 C   s   || _ | |g d S r?   )r   r   r   r#   r#   r$   r|   \  s    c                 C   s   || _ | | d S r?   )r   r   r   r#   r#   r$   r}   a  s    c                 C   s   |  | jS )zj Id of the beginning of sentence token in the vocabulary. Log an error if used while not having been set. )convert_tokens_to_idsrw   rP   r#   r#   r$   bos_token_idf  s    zSpecialTokensMixin.bos_token_idc                 C   s   |  | jS )zd Id of the end of sentence token in the vocabulary. Log an error if used while not having been set. )r   rx   rP   r#   r#   r$   eos_token_idk  s    zSpecialTokensMixin.eos_token_idc                 C   s   |  | jS )z\ Id of the unknown token in the vocabulary. Log an error if used while not having been set. )r   ry   rP   r#   r#   r$   unk_token_idp  s    zSpecialTokensMixin.unk_token_idc                 C   s   |  | jS )z Id of the separation token in the vocabulary. E.g. separate context and query in an input sequence. Log an error if used while not having been set. )r   rz   rP   r#   r#   r$   sep_token_idu  s    zSpecialTokensMixin.sep_token_idc                 C   s   |  | jS )z\ Id of the padding token in the vocabulary. Log an error if used while not having been set. )r   r6   rP   r#   r#   r$   r4   z  s    zSpecialTokensMixin.pad_token_idc                 C   s   | j S )z0 Id of the padding token type in the vocabulary.)r   rP   r#   r#   r$   r5     s    z$SpecialTokensMixin.pad_token_type_idc                 C   s   |  | jS )z Id of the classification token in the vocabulary. E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. )r   r{   rP   r#   r#   r$   cls_token_id  s    zSpecialTokensMixin.cls_token_idc                 C   s   |  | jS )z Id of the mask token in the vocabulary. E.g. when training a model with masked-language modeling. Log an error if used while not having been set. )r   r|   rP   r#   r#   r$   mask_token_id  s    z SpecialTokensMixin.mask_token_idc                 C   s   |  | jS )z Ids of all the additional special tokens in the vocabulary (list of integers). Log an error if used while not having been set. )r   r}   rP   r#   r#   r$   additional_special_tokens_ids  s    z0SpecialTokensMixin.additional_special_tokens_idsc                 C   s.   i }| j D ]}t| d| }|r
|||< q
|S )z A dictionary mapping special token class attribute (cls_token, unk_token...) to their
            values ('<unk>', '<cls>'...)
        _)r   getattr)rE   set_attrattr
attr_valuer#   r#   r$   special_tokens_map  s    

z%SpecialTokensMixin.special_tokens_mapc                 C   sH   g }| j }| D ]$}|t|ttfr.t|n|g }qtt|}|S )z} List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
            (cls_token, unk_token...).
        )r   rQ   rB   r   r   set)rE   all_toksr   r   r#   r#   r$   all_special_tokens  s    "z%SpecialTokensMixin.all_special_tokensc                 C   s   | j }| |}|S )z List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
            class attributes (cls_token, unk_token...).
        )r   r   )rE   r   Zall_idsr#   r#   r$   all_special_ids  s    
z"SpecialTokensMixin.all_special_idsN)r   r   r   r    r   rA   rt   rw   rx   ry   rz   r6   r{   r|   r}   r   setterr   r   r   r   r4   r5   r   r   r   r   r   r   r#   r#   r#   r$   rv     s   


























rv   c                       s  e Zd ZU dZi Zeeef ed< i Zeeeeef f ed< i Z	eeeee
f f ed< i Zeeef ed< ddgZee ed< d	Zeed
< dZdZeedddZedd Zedd Zedd Zedd Zejdd Zejdd Zdd Zdj fdd	Zdd  Zed!d" Zed#d$ Zd%d& Zd'd( Z d)d* Z!dkd,d-Z"d.d/ Z#e$d0d1d2Z%d3d4 Z&d5d6 Z'd7d8 Z(d9d: Z)dle*e$e+e,f e-e*e$e+e,f  e.e-e eee.e-e d>d?d@Z/dme*e$e+e,f e-e*e$e+e,f  e.e-e eee.e.e-e e-e. e-e. e.e.e.e0dAdBdCZ1dne*ee$ ee2 ee+ ee3 ee, ee4 f e.e-e eee.e.e-e e-e. e-e. e.e.e.e.e0dDdEdFZ5e6eddGdHdIZ7doee e-ee  e-e e.eee.e-e e-e. e-e. e.e.e.e0dJdKdLZ8eedMdNdOZ9dpee e-ee  eeee:ee ee ee f dPdQdRZ;dqee-e ee dSdTdUZ<dree-e edSdVdWZ=dsee-e e.ee dXdYdZZ>dte*eee f e.e*eee f d[d\d]Z?eed^d_d`Z@ee edadbdcZAduee e.e.edddedfZBeCeedgdhdiZD  ZES )vPreTrainedTokenizeraX   Base class for all tokenizers.

    Handle all the shared methods for tokenization and special tokens as well as methods
    downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.

    This class also contain the added tokens in a unified way on top of all tokenizers so we don't
    have to handle the specific vocabulary augmentation methods of the various underlying
    dictionary structures (BPE, sentencepiece...).

    Class attributes (overridden by derived classes):

        - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
            required by the model, and as associated values, the filename for saving the associated file (string).
        - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
            being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
            `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
            associated pretrained vocabulary file.
        - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
            models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
            model has no maximum input size.
        - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
            pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
            ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
            ``from_pretrained()`` method.

    Args:
        - ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
            When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
            model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
            no associated max_length can be found in ``max_model_input_sizes``.
        - ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
            Should be selected between ['right', 'left']
        - ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
            model ("token_type_ids", "attention_mask"...).
        - ``bos_token``: (`Optional`) string: a beginning of sentence token.
            Will be associated to ``self.bos_token`` and ``self.bos_token_id``
        - ``eos_token``: (`Optional`) string: an end of sentence token.
            Will be associated to ``self.eos_token`` and ``self.eos_token_id``
        - ``unk_token``: (`Optional`) string: an unknown token.
            Will be associated to ``self.unk_token`` and ``self.unk_token_id``
        - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
            Will be associated to ``self.sep_token`` and ``self.sep_token_id``
        - ``pad_token``: (`Optional`) string: a padding token.
            Will be associated to ``self.pad_token`` and ``self.pad_token_id``
        - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
            leveraging self-attention along the full depth of the model).
            Will be associated to ``self.cls_token`` and ``self.cls_token_id``
        - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
            modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
        - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
            Adding all special tokens here ensure they won't be split by the tokenization process.
            Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
    vocab_files_namespretrained_vocab_files_mappretrained_init_configurationmax_model_input_sizestoken_type_idsattention_maskmodel_input_namesrightr3   zNo padding token is set for this model, therefore no batch can be made with uneven sequences. Set a padding token or adjust the lengths of the sequences building the batch so that every sequence is of the same length.zThe sequences building the batch are not of the same size, no tensor can be built. Set `pad_to_max_length=True` to pad the smaller sequencesup to the larger sequence's length.rT   c                 C   s   t dS )z8 Size of the base vocabulary (without the added tokens) NNotImplementedErrorrP   r#   r#   r$   
vocab_size   s    zPreTrainedTokenizer.vocab_sizec                 C   s   dS )NFr#   rP   r#   r#   r$   is_fast  s    zPreTrainedTokenizer.is_fastc                 C   s   | j S )zq Kept here for backward compatibility.
            Now renamed to `model_max_length` to avoid ambiguity.
        )model_max_lengthrP   r#   r#   r$   max_len	  s    zPreTrainedTokenizer.max_lenc                 C   s   | j | jdd S )NFpairr   num_special_tokens_to_addrP   r#   r#   r$   max_len_single_sentence  s    z+PreTrainedTokenizer.max_len_single_sentencec                 C   s   | j | jdd S NTr   r   rP   r#   r#   r$   max_len_sentences_pair  s    z*PreTrainedTokenizer.max_len_sentences_pairc                 C   s.   || j | jdd kr"td ntddS )zM For backward compatibility, allow to try to setup 'max_len_single_sentence' Fr   zXSetting 'max_len_single_sentence' is now deprecated. This value is automatically set up.Nr   r   r8   r9   rW   r   r#   r#   r$   r     s    c                 C   s.   || j | jdd kr"td ntddS )zL For backward compatibility, allow to try to setup 'max_len_sentences_pair' Tr   zWSetting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.Nr   r   r#   r#   r$   r   $  s    c                 C   s
   t  dS )z Returns the vocabulary as a dict of {token: index} pairs. `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the vocab. Nr   rP   r#   r#   r$   	get_vocab0  s    zPreTrainedTokenizer.get_vocabNc                    s   t  jf | |d k	r|n
|dd }|d k	r2|nt| _|d| j| _| jdksbtd| j |d| j| _i | _t	 | _
i | _d| _i | _d S )Nr   r3   )r   leftzKPadding side should be selected between 'right' and 'left', current value: r   r#   )r@   rA   popVERY_LARGE_INTEGERr   r3   r   r   added_tokens_encoderr   unique_added_tokens_encoderadded_tokens_decoderinit_inputsinit_kwargs)rE   r   r   rF   r#   r$   rA   4  s    
zPreTrainedTokenizer.__init__c                 C   s   | j t| j S )z3 Size of the full vocabulary with the added tokens )r   lenr   rP   r#   r#   r$   __len__M  s    zPreTrainedTokenizer.__len__c                 O   s   | j ||S )a  
        Instantiate a :class:`~transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.

        Args:
            pretrained_model_name_or_path: either:

                - a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
                - a string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
                - a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
                - (not applicable to all derived classes, deprecated) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.

            cache_dir: (`optional`) string:
                Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.

            force_download: (`optional`) boolean, default False:
                Force to (re-)download the vocabulary files and override the cached versions if they exists.

            resume_download: (`optional`) boolean, default False:
                Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.

            proxies: (`optional`) dict, default None:
                A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
                The proxies are used on each request.

            inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.

            kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~transformers.PreTrainedTokenizer` for details.

        Examples::

            # We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer

            # Download vocabulary from S3 and cache.
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

            # Download vocabulary from S3 (user-uploaded) and cache.
            tokenizer = BertTokenizer.from_pretrained('dbmdz/bert-base-german-cased')

            # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
            tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')

            # If the tokenizer uses a single vocabulary file, you can point directly to this file
            tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')

            # You can link tokens to special vocabulary when instantiating
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
            # You should be sure '<unk>' is in the vocabulary when doing that.
            # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
            assert tokenizer.unk_token == '<unk>'

        )_from_pretrained)clsinputsr   r#   r#   r$   from_pretrainedQ  s    5z#PreTrainedTokenizer.from_pretrainedc           $      O   s  | dd }| dd}| dd}| dd }| dd}t| j }	i }
i }||	kr| j D ]\}}|| |
|< qd| jr|| jkr| j|  }nt	d
|d|	| tj|st|rt| jd	krtd

| jtd
| j t| j d }||
|< n|tttd}| j| D ]`\}}tj|rxtj||}tj|st	d
| d }nt||dd}||
|< q0zHi }|
 D ]6\}}|d krd ||< nt||||||d||< qW nN tk
r(   ||	krd}n d
|d|	|t| j }t|Y nX tdd | D rftd
|d|	|t| j |
 D ]@\}}||| krt	d
| nt	d
|||  qn| dd }|d k	r t|dd}t !|}W 5 Q R X | dd}|s|}n|}|"| || jkrX| j| }|d k	rXt#|t$t%frXt&|'dt$d||d< | dd }| d d }| D ]\}}||krx|||< qx|d k	rt|dd}t !|}W 5 Q R X | D ]\}}||kr|||< qz| ||} W n t(k
r   t(d!Y nX || _)|| _*| j+"t,| j- |d k	rt|dd}!t !|!}"W 5 Q R X d"d# |" D }#| j."|" | j/"|# | j+"t,| j.  | S )$N	cache_dirforce_downloadFresume_downloadproxieslocal_files_onlyzModel name '{}' not found in model shortcut name list ({}). Assuming '{}' is a path, a model identifier, or url to a directory containing tokenizer files.z, r   zCalling {}.from_pretrained() with the path to a single file or url is not supported.Use a model identifier or the path to a directory instead.zPCalling {}.from_pretrained() with the path to a single file or url is deprecatedr   )added_tokens_filespecial_tokens_map_filetokenizer_config_filez&Didn't find file {}. We won't load it.)filenameZuse_cdn)r   r   r   r   r   z;Couldn't reach server at '{}' to download vocabulary files.zModel name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path or url to a directory containing vocabulary files named {}, but couldn't find such vocabulary files at this path or url.c                 s   s   | ]}|d kV  qd S r?   r#   )rp   full_file_namer#   r#   r$   r     s     z7PreTrainedTokenizer._from_pretrained.<locals>.<genexpr>zModel name '{}' was not found in tokenizers model name list ({}). We assumed '{}' was a path, a model identifier, or url to a directory containing vocabulary files named {} but couldn't find such vocabulary files at this path or url.zloading file {}z loading file {} from cache at {}r   utf-8r>   r   r#   r   r   r   r   zoUnable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.c                 S   s   i | ]\}}||qS r#   r#   ro   r#   r#   r$   rs   1  s      z8PreTrainedTokenizer._from_pretrained.<locals>.<dictcomp>)0r   r   r   rO   r   rS   r   copyr8   infor:   joinospathisfiler   r   r   rW   r   r9   ADDED_TOKENS_FILESPECIAL_TOKENS_MAP_FILETOKENIZER_CONFIG_FILEisdirexistsr   r   EnvironmentErrorrQ   r   openjsonloadupdaterB   r!   floatmingetOSErrorr   r   r   r   r   r   r   )$r   Zpretrained_model_name_or_pathr   r   r   r   r   r   r   Z	s3_modelsvocab_filesZinit_configurationfile_idZmap_listZadditional_files_names	file_namer   Zresolved_vocab_files	file_pathmsgr   Ztokenizer_config_handler   Zsaved_init_inputsr   r   r   Z	args_nameZspecial_tokens_map_handler   r   r   r.   Zadded_tokens_handleadded_tok_encoderadded_tok_decoderr#   r#   r$   r     s     
  










z$PreTrainedTokenizer._from_pretrainedc           
   	   C   sH  t j|s td| dS t j|t}t j|t}t j|t	}t
| j}t| jdkrtt
| j|d< | j D ]}||d q~t|ddd}|tj|dd	 W 5 Q R X t|ddd}|tj| jdd	 W 5 Q R X t| jdkr2t|ddd }tj| jdd	}|| W 5 Q R X | |}	|	||f S )
ac   Save the tokenizer vocabulary files together with:
                - added tokens,
                - special-tokens-to-class-attributes-mapping,
                - tokenizer instantiation positional and keywords inputs (e.g. do_lower_case for Bert).

            Warning: This won't save modifications you may have applied to the tokenizer after the instantiation
            (e.g. modifying tokenizer.do_lower_case after creation).

            This method make sure the full tokenizer can then be re-loaded using the
            :func:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
        z+Saving directory ({}) should be a directoryNr   r   wr   r   F)ensure_ascii)r   r   r   r8   r   r:   r   r   r   r   r   deepcopyr   r   r   r   rO   r   r   writer   dumpsr   r   save_vocabulary)
rE   save_directoryr   r   r   Ztokenizer_configr   fZout_strr   r#   r#   r$   save_pretrained8  s*     
z#PreTrainedTokenizer.save_pretrainedc                 C   s   t dS )a   Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
            and special token mappings.

            Please use :func:`~transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full
            Tokenizer state if you want to reload it using the :func:`~transformers.PreTrainedTokenizer.from_pretrained`
            class method.
        Nr   )rE   r   r#   r#   r$   r   a  s    z#PreTrainedTokenizer.save_vocabularyc                    s   |sdS t |ts|g}g }|D ]p}t |ts2t jddrR| jkrR| }| jkr  	| 	 jkr ||kr |
| td| q t fddt|D }dd | D } j| t j t j _ j| t|S )	aK  
        Add a list of new tokens to the tokenizer class. If the new tokens are not in the
        vocabulary, they are added to it with indices starting from length of the current vocabulary.

        Args:
            new_tokens: string or list of string. Each string is a token to add. Tokens are only added if they are not
            already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).

        Returns:
            Number of tokens added to the vocabulary.

        Examples::

            # Let's see how to increase the vocabulary of Bert model and tokenizer
            tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
            model = BertModel.from_pretrained('bert-base-uncased')

            num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
            print('We have added', num_added_toks, 'tokens')
            model.resize_token_embeddings(len(tokenizer))  # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
        r   do_lower_caseFzAdding %s to the vocabularyc                 3   s"   | ]\}}|t  | fV  qd S r?   r   )rp   itokrP   r#   r$   r     s     z1PreTrainedTokenizer.add_tokens.<locals>.<genexpr>c                 S   s   i | ]\}}||qS r#   r#   ro   r#   r#   r$   rs     s      z2PreTrainedTokenizer.add_tokens.<locals>.<dictcomp>)rB   r   rJ   r   r   r   r   lowerry   r   appendr8   r   dict	enumeraterS   r   r   r   rO   unionr   r   r   )rE   
new_tokensZto_add_tokenstokenr   r   r#   rP   r$   
add_tokensk  s.    

zPreTrainedTokenizer.add_tokensFc                 C   s    g }g }t | ||r|ndS )a)  
        Returns the number of added tokens when encoding a sequence with special tokens.

        Note:
            This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
            inside your training loop.

        Args:
            pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
                number of added tokens in the case of a single sequence if set to False.

        Returns:
            Number of tokens added to sequences
        N)r    build_inputs_with_special_tokens)rE   r   token_ids_0token_ids_1r#   r#   r$   r     s    z-PreTrainedTokenizer.num_special_tokens_to_addc                 C   s   |sdS d}|  D ]\}}|| jks*t|dkrft|ttfrRtdd |D sVt|| |7 }nt|tstt|| |g7 }t	
d|| t| || q|S )a  
        Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
        to class attributes. If special tokens are NOT in the vocabulary, they are added
        to it (indexed starting from the last index of the current vocabulary).

        Using `add_special_tokens` will ensure your special tokens can be used in several ways:

        - special tokens are carefully handled by the tokenizer (they are never split)
        - you can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This makes it easy to develop model-agnostic training and fine-tuning scripts.

        When possible, special tokens are already registered for provided pretrained models (ex: BertTokenizer cls_token is already registered to be '[CLS]' and XLM's one is also registered to be '</s>')

        Args:
            special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
                [``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
                ``additional_special_tokens``].

                Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).

        Returns:
            Number of tokens added to the vocabulary.

        Examples::

            # Let's see how to add a new classification token to GPT-2
            tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
            model = GPT2Model.from_pretrained('gpt2')

            special_tokens_dict = {'cls_token': '<CLS>'}

            num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
            print('We have added', num_added_toks, 'tokens')
            model.resize_token_embeddings(len(tokenizer))  # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.

            assert tokenizer.cls_token == '<CLS>'
        r   r}   c                 s   s   | ]}t |tV  qd S r?   r~   r   r#   r#   r$   r     s     z9PreTrainedTokenizer.add_special_tokens.<locals>.<genexpr>z+Assigning %s to the %s key of the tokenizer)rS   r   r   rB   r   r   r   r  rJ   r8   r   r   )rE   special_tokens_dictadded_tokensr   r   r#   r#   r$   add_special_tokens  s    %$z&PreTrainedTokenizer.add_special_tokens)textc                    s`   j  j|f|} fdd}jddr6||}dd fdd}j}|||}|S )	a   Converts a string in a sequence of tokens (string), using the tokenizer.
            Split in words for word-based vocabulary or sub-words for sub-word-based
            vocabularies (BPE/SentencePieces/WordPieces).

            Take care of added tokens.

            Args:
                text (:obj:`string`): The sequence to be encoded.
                **kwargs (:obj: `dict`): Arguments passed to the model-specific `prepare_for_tokenization` preprocessing method.
        c                    s6   dd  D }dd | d d }t|dd | S )	Nc                 S   s   g | ]}t |qS r#   )reescape)rp   Zs_tokr#   r#   r$   
<listcomp>  s     zHPreTrainedTokenizer.tokenize.<locals>.lowercase_text.<locals>.<listcomp>(|z)|z(.+?)c                 S   s   |   d p|   d  S Nr   r   )groupsr  )mr#   r#   r$   <lambda>      zFPreTrainedTokenizer.tokenize.<locals>.lowercase_text.<locals>.<lambda>)r   r  sub)r   Zescaped_special_tokspattern)r   r#   r$   lowercase_text  s    z4PreTrainedTokenizer.tokenize.<locals>.lowercase_textr  Fc                 S   s~   g }| | }t|D ]b\}}| }|dkr>|s>|| g7 }q|t|d kr`|rx||g7 }qxq|rn||g7 }|| g7 }q|S r  )splitr
  rstripr   )r  r  resultZ
split_textr  sub_textr#   r#   r$   split_on_token  s    

z4PreTrainedTokenizer.tokenize.<locals>.split_on_tokenc                    s   |  sg S | s |S g }|g}| D ]:}g }|D ](}| jkrR|||7 }q4||g7 }q4|}q(ttj fdd|D S )Nc                 3   s(   | ] }| j kr |n|gV  qd S r?   )r   	_tokenize)rp   r  rP   r#   r$   r   "  s   zHPreTrainedTokenizer.tokenize.<locals>.split_on_tokens.<locals>.<genexpr>)stripr(  r   r   	itertoolschainfrom_iterable)Ztok_listr  tokenized_textZ	text_listr  r&  )rE   r'  r#   r$   split_on_tokens  s(    


z5PreTrainedTokenizer.tokenize.<locals>.split_on_tokens)r   prepare_for_tokenizationr   r   r   )rE   r  r   r"  r.  r  r-  r#   )r   rE   r'  r$   tokenize  s    
zPreTrainedTokenizer.tokenizec                 K   s   t dS )a   Converts a string in a sequence of tokens (string), using the tokenizer.
            Split in words for word-based vocabulary or sub-words for sub-word-based
            vocabularies (BPE/SentencePieces/WordPieces).

            Do NOT take care of added tokens.
        Nr   rE   r  r   r#   r#   r$   r(  -  s    zPreTrainedTokenizer._tokenizec                 C   sB   |dkrdS t |tr | |S g }|D ]}|| | q(|S )z Converts a token string (or a sequence of tokens) in a single integer id
            (or a sequence of ids), using the vocabulary.
        N)rB   rJ   #_convert_token_to_id_with_added_vocr  )rE   rX   idsr  r#   r#   r$   r   6  s    

z)PreTrainedTokenizer.convert_tokens_to_idsc                 C   s*   |d krd S || j kr | j | S | |S r?   )r   _convert_token_to_idrE   r  r#   r#   r$   r2  E  s
    

z7PreTrainedTokenizer._convert_token_to_id_with_added_vocc                 C   s   t d S r?   r   r5  r#   r#   r$   r4  M  s    z(PreTrainedTokenizer._convert_token_to_idTr   longest_first)r  	text_pairr  r/   r0   truncation_strategyr2   return_tensorsc	              
   K   s*   | j |f|||||||d|	}
|
d S )a@  
        Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.

        Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.

        Args:
            text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method)
            text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
                string using the `tokenize` method) or a list of integers (tokenized string ids using the
                `convert_tokens_to_ids` method)
            add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
                If set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
                If set to a number, will limit the total sequence returned so that it has a maximum length.
                If there are overflowing tokens, those will be added to the returned dictionary.
                You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
            truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
                String selected in the following options:

                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                  starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the
                model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
                which can be set to the following strings:

                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
                Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
                or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
            **kwargs: passed to the `self.tokenize()` method
        )r7  r/   r  r0   r8  r2   r9  	input_ids)encode_plus)rE   r  r7  r  r/   r0   r8  r2   r9  r   encoded_inputsr#   r#   r$   encodeP  s    :	zPreTrainedTokenizer.encode)r  r7  r  r/   r0   r8  r2   is_pretokenizedr9  return_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mappingrI   c                    sp    fdd}|rt d|r2jdkr2td||}|dk	rJ||nd}j|||| |||	||
||dS )aI  
        Returns a dictionary containing the encoded sequence or sequence pair and additional information:
        the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.

        Args:
            text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]` (the later only for not-fast tokenizers)):
                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method)
            text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
                string using the `tokenize` method) or a list of integers (tokenized string ids using the
                `convert_tokens_to_ids` method)
            add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
                If set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
                If set to a number, will limit the total sequence returned so that it has a maximum length.
                If there are overflowing tokens, those will be added to the returned dictionary
                You can set it to the maximal input size of the model with `max_length = tokenizer.model_max_length`.
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
            truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
                String selected in the following options:

                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                  starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the
                model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
                which can be set to the following strings:

                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
                Set to True to indicate the input is already tokenized
            return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
                Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
                or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
            return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
                Whether to return token type IDs. If left to the default, will return the token type IDs according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are token type IDs? <../glossary.html#token-type-ids>`_
            return_attention_mask (:obj:`bool`, `optional`, defaults to :obj:`none`):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are attention masks? <../glossary.html#attention-mask>`__
            return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return overflowing token information (default False).
            return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return special tokens mask information (default False).
            return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return (char_start, char_end) for each token (default False).
                If using Python's tokenizer, this method will raise NotImplementedError.
                This one is only available on fast tokenizers inheriting from PreTrainedTokenizerFast.
            **kwargs: passed to the `self.tokenize()` method

        Return:
            A Dictionary of shape::

                {
                    input_ids: list[int],
                    token_type_ids: list[int] if return_token_type_ids is True (default)
                    attention_mask: list[int] if return_attention_mask is True (default)
                    overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
                    num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
                    special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True``
                    and return_special_tokens_mask is True
                }

            With the fields:

            - ``input_ids``: list of token ids to be fed to a model
            - ``token_type_ids``: list of token type ids to be fed to a model
            - ``attention_mask``: list of indices specifying which tokens should be attended to by the model
            - ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
            - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
            - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
              tokens and 1 specifying sequence tokens.
        c                    s   t | tr*j| fd i}|S t | ttfr\t| dkr\t | d tr\| S t | ttfrt| dkrt | d tr| S tdd S Nr  r   z\Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers.	rB   rJ   r0  r   r   r   r   r!   rW   r  rX   r  r   rE   r#   r$   get_input_ids  s    

(
(z6PreTrainedTokenizer.encode_plus.<locals>.get_input_ids
  return_offset_mapping is not available when using Python tokenizers.To use this feature, change your tokenizer to one deriving from transformers.PreTrainedTokenizerFast.More information on available tokenizers at https://github.com/huggingface/transformers/pull/2674N  Unable to set proper padding strategy as the tokenizer does not have a padding token. In this case please set the `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via the function add_special_tokens if you want to use a padding strategy)pair_idsr/   r2   r  r0   r8  r9  r@  r?  rA  rB  )r   r4   rW   prepare_for_model)rE   r  r7  r  r/   r0   r8  r2   r>  r9  r?  r@  rA  rB  rC  r   rH  	first_ids
second_idsr#   rG  r$   r;    s2    k	zPreTrainedTokenizer.encode_plus)batch_text_or_text_pairsr  r/   r0   r8  r2   r>  r9  r?  return_attention_masksrA  return_special_tokens_masksrC  return_lengthsrI   c                    sR   fdd}|r&j dkr&td|r2tdg }|D ]`}t|ttfrft|dkrf|sf|\}}n
|d }}||}|dk	r||nd}|||f q:|dkr|rʇfddtfd	d
|D }i }|D ]`\}}j	|||| |||
|	|||dd}|
 D ]*\}}||kr g ||< || | qq|dk	rJ|| t|S )a&  
        Returns a dictionary containing the encoded sequence or sequence pair and additional information:
        the mask for sequence classification and the overflowing elements if a ``max_length`` is specified.

        Args:
            batch_text_or_text_pairs (:obj:`List[str]`,  :obj:`List[Tuple[str, str]]`,
                                      :obj:`List[List[str]]`,  :obj:`List[Tuple[List[str], List[str]]]`,
                                      and for not-fast tokenizers, also:
                                      :obj:`List[List[int]]`,  :obj:`List[Tuple[List[int], List[int]]]`):
                Batch of sequences or pair of sequences to be encoded.
                This can be a list of string/string-sequences/int-sequences or a list of pair of
                string/string-sequences/int-sequence (see details in encode_plus)
            add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`True`):
                If set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            max_length (:obj:`int`, `optional`, defaults to :obj:`None`):
                If set to a number, will limit the total sequence returned so that it has a maximum length.
                If there are overflowing tokens, those will be added to the returned dictionary
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
            truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
                String selected in the following options:

                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                  starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the
                model's max length. The tokenizer padding sides are handled by the class attribute `padding_side`
                which can be set to the following strings:

                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            is_pretokenized (:obj:`bool`, defaults to :obj:`False`):
                Set to True to indicate the input is already tokenized
            return_tensors (:obj:`str`, `optional`, defaults to :obj:`None`):
                Can be set to 'tf' or 'pt' to return respectively TensorFlow :obj:`tf.constant`
                or PyTorch :obj:`torch.Tensor` instead of a list of python integers.
            return_token_type_ids (:obj:`bool`, `optional`, defaults to :obj:`None`):
                Whether to return token type IDs. If left to the default, will return the token type IDs according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are token type IDs? <../glossary.html#token-type-ids>`_
            return_attention_masks (:obj:`bool`, `optional`, defaults to :obj:`none`):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the :obj:`return_outputs` attribute.

                `What are attention masks? <../glossary.html#attention-mask>`__
            return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return overflowing token information (default False).
            return_special_tokens_masks (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return special tokens mask information (default False).
            return_offsets_mapping (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Set to True to return (char_start, char_end) for each token (default False).
                If using Python's tokenizer, this method will raise NotImplementedError. This one is only available on
                Rust-based tokenizers inheriting from PreTrainedTokenizerFast.
            return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set the resulting dictionary will include the length of each encoded inputs
            **kwargs: passed to the `self.tokenize()` method

        Return:
            A Dictionary of shape::

                {
                    input_ids: list[List[int]],
                    token_type_ids: list[List[int]] if return_token_type_ids is True (default)
                    attention_mask: list[List[int]] if return_attention_mask is True (default)
                    overflowing_tokens: list[List[int]] if a ``max_length`` is specified and return_overflowing_tokens is True
                    num_truncated_tokens: List[int] if a ``max_length`` is specified and return_overflowing_tokens is True
                    special_tokens_mask: list[List[int]] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
                }

            With the fields:

            - ``input_ids``: list of token ids to be fed to a model
            - ``token_type_ids``: list of token type ids to be fed to a model
            - ``attention_mask``: list of indices specifying which tokens should be attended to by the model
            - ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
            - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
            - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
              tokens and 1 specifying sequence tokens.
        c                    s   t | tr*j| fd i}|S t | ttfr\t| dkr\t | d tr\| S t | ttfrt| dkrt | d tr| S tdd S rD  rE  rF  rG  r#   r$   rH    s    

(
(z<PreTrainedTokenizer.batch_encode_plus.<locals>.get_input_idsNrJ  rI     c                    s4   | \}}t ||d kr  nt | jdd  S r   )r   r   )Zinput_pairsrM  rN  rP   r#   r$   total_sequence_length  s    
zDPreTrainedTokenizer.batch_encode_plus.<locals>.total_sequence_lengthc                    s   g | ]} |qS r#   r#   )rp   r3  )rT  r#   r$   r    s     z9PreTrainedTokenizer.batch_encode_plus.<locals>.<listcomp>)rK  r/   r2   r  r0   r8  r@  r?  rA  rB  rR  r9  )r4   rW   r   rB   r   r   r   r  maxrL  rS   convert_to_tensors_r<   )rE   rO  r  r/   r0   r8  r2   r>  r9  r?  rP  rA  rQ  rC  rR  r   rH  r:  Zids_or_pair_idsr3  rK  rM  rN  batch_outputsoutputsr   r   r#   )r  r   rE   rT  r$   batch_encode_plus3  sX    q



z%PreTrainedTokenizer.batch_encode_plus)rW  r9  rI   c              	   C   s  |  D ]\}}|dkrpt rpzt|||< W q tk
rl   d dd |D kr^t| jn
t| jY qX q|dkrt rzt	|||< W q tk
r   t| jY q t
k
r   d dd |D krt| jn Y qX q|d k	rtd| qd S )Ntfc                 S   s   g | ]}|D ]}|qqS r#   r#   rp   sequencerH   r#   r#   r$   r    s       z;PreTrainedTokenizer.convert_to_tensors_.<locals>.<listcomp>ptc                 S   s   g | ]}|D ]}|qqS r#   r#   r[  r#   r#   r$   r    s       VUnable to convert output to tensors format {}, PyTorch or TensorFlow is not available.)rS   r   rZ  constantrW   NO_PAD_TOKEN_FOR_BATCH_MSGUNEVEN_SEQUENCES_FOR_BATCH_MSGr   torchtensorRuntimeErrorr8   r9   r:   )rE   rW  r9  r   r   r#   r#   r$   rV    s.    
z'PreTrainedTokenizer.convert_to_tensors_)r3  rK  r/   r  r0   r8  r2   r9  r?  r@  rA  rB  rR  rI   c                 C   sf  t |dk	}t|}|r t|nd}|	dkr6d| jk}	|
dkrHd| jk}
i }|| |rb| j|dnd }|r||kr| j|||| ||d\}}}|r||d< || |d< |r| ||}| ||}n4|r|| n|}dgt| |rd	gt| ng  }||d
< |	r||d< |rB|r0| |||d< ndgt| |d< |dksbt|d
 |ksbt|dkrt|d
 | j	krt
dt|| j	 |o|rt|d
 |k p|dkot|d
 | j	k o| j	tk}|r|dkr| j	tkrt
d |rJ|dk	r|n| j	t|d
  }| jdkr|
r\d	gt|d
  dg|  |d< |	rz|d | jg|  |d< |r|d d	g|  |d< |d
 | jg|  |d
< n| jdkr6|
rdg| d	gt|d
   |d< |	r | jg| |d  |d< |rd	g| |d  |d< | jg| |d
  |d
< ntdt| j n|
rfd	gt|d
  |d< |r|t|d
 |d< |dkrt rt|d
 g|d
< d|krt|d g|d< d|kr^t|d g|d< n~|dkrDt rDt|d
 g|d
< d|kr$t|d g|d< d|kr^t|d g|d< n|dk	r^t
d| t|S )a
   Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model.
        It adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
        manages a moving window (with user defined stride) for overflowing tokens

        Args:
            ids: list of tokenized input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            max_length: maximum length of the returned list. Will truncate by taking into account the special tokens.
            add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
                to their model.
            stride: window stride for overflowing tokens. Can be useful to remove edge effect when using sequential
                list of inputs. The overflowing token will contains a part of the previous window of tokens.
            truncation_strategy: string selected in the following options:
                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                    starting from the longest one at each token (when there is a pair of input sequences)
                - 'only_first': Only truncate the first sequence
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            pad_to_max_length: if set to True, the returned sequences will be padded according to the model's padding side and
                padding index, up to their max length. If no max length is specified, the padding is done up to the model's max length.
                The tokenizer padding sides are handled by the following strings:
                - 'left': pads on the left of the sequences
                - 'right': pads on the right of the sequences
                Defaults to False: no padding.
            return_tensors: (optional) can be set to 'tf' or 'pt' to return respectively TensorFlow tf.constant
                or PyTorch torch.Tensor instead of a list of python integers.
            return_token_type_ids: (optional) Set to False to avoid returning token_type_ids (default: set to model specifics).
            return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
            return_overflowing_tokens: (optional) Set to True to return overflowing token information (default False).
            return_special_tokens_mask: (optional) Set to True to return special tokens mask information (default False).
            return_lengths (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If set the resulting dictionary will include the length of each encoded inputs

        Return:
            A Dictionary of shape::

                {
                    input_ids: list[int],
                    token_type_ids: list[int] if return_token_type_ids is True (default)
                    overflowing_tokens: list[int] if a ``max_length`` is specified and return_overflowing_tokens is True
                    num_truncated_tokens: int if a ``max_length`` is specified and return_overflowing_tokens is True
                    special_tokens_mask: list[int] if ``add_special_tokens`` if set to ``True`` and return_special_tokens_mask is True
                    length: int if return_lengths is True
                }

            With the fields:
                - ``input_ids``: list of token ids to be fed to a model
                - ``token_type_ids``: list of token type ids to be fed to a model

                - ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
                - ``num_truncated_tokens``: number of overflowing tokens a ``max_length`` is specified
                - ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
                    tokens and 1 specifying sequence tokens.
                - ``length``: this is the length of ``input_ids``
        Nr   r   r   r   )rK  num_tokens_to_remover8  r0   overflowing_tokensZnum_truncated_tokensr   r:  special_tokens_maskzToken indices sequence length is longer than the specified maximum sequence length for this model ({} > {}). Running this sequence through the model will result in indexing errorszdSequence can't be padded as no maximum length is specified and the model maximum length is too high.r   r   zInvalid padding strategy:lengthrZ  r]  r^  )boolr   r   r   truncate_sequencesr  $create_token_type_ids_from_sequencesget_special_tokens_maskr   r   r8   r9   r:   LARGE_INTEGERr3   r5   r4   rW   rJ   r   rZ  r_  r   rb  rc  r<   )rE   r3  rK  r/   r  r0   r8  r2   r9  r?  r@  rA  rB  rR  r   Zlen_idsZlen_pair_idsr<  Z	total_lenrf  r\  r   Zneeds_to_be_padded
differencer#   r#   r$   rL    s    I

$  
  





z%PreTrainedTokenizer.prepare_for_model)r  rI   c                 K   s   |S )z< Performs any necessary transformations before tokenization r#   r1  r#   r#   r$   r/    s    z,PreTrainedTokenizer.prepare_for_tokenization)r3  rK  re  r8  r0   rI   c           	      C   sX  |dkr||g fS |dkrg }t |D ]D}|dksBt|t|kr^|d g| }|dd }q&|dd }q&tt||}|dkr|| d | }n|dkrt||ksttt||| }|| d }|d|  }np|dkr2|dk	rt||ksttt||| }|| d }|d|  }n|dkrFtdntd	|||fS )
a,   Truncates a sequence pair in place to the maximum length.

        Args:
            ids: list of tokenized input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
                `tokenize` and `convert_tokens_to_ids` methods.
            num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
                number of tokens to remove using the truncation strategy
            truncation_strategy: string selected in the following options:
                - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length
                    starting from the longest one at each token (when there is a pair of input sequences).
                    Overflowing tokens only contains overflow from the first sequence.
                - 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
                - 'only_second': Only truncate the second sequence
                - 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_length)
            stride (:obj:`int`, `optional`, defaults to ``0``):
                If set to a number along with max_length, the overflowing tokens returned will contain some tokens
                from the main sequence returned. The value of this argument defines the number of additional tokens.
        r   r6  NZ
only_firstZonly_secondZdo_not_truncatezPInput sequence are too long for max_length. Please select a truncation strategy.zkTruncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate'])ranger   r   r   rW   )	rE   r3  rK  re  r8  r0   rf  r   Z
window_lenr#   r#   r$   rj    s8    



z&PreTrainedTokenizer.truncate_sequences)r  r  rI   c                 C   s2   |d krt |dg S dgt | dgt |  S r  r  rE   r  r  r#   r#   r$   rk    s    z8PreTrainedTokenizer.create_token_type_ids_from_sequencesc                 C   s   |dkr|S || S )a2  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks
        by concatenating and adding special tokens.
        A RoBERTa sequence has the following format:
            single sequence: <s> X </s>
            pair of sequences: <s> A </s></s> B </s>
        Nr#   rq  r#   r#   r$   r     s    z4PreTrainedTokenizer.build_inputs_with_special_tokens)r  r  already_has_special_tokensrI   c                 C   s   dg|rt |ndt |  S )a  
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.

        Args:
            token_ids_0: list of ids (must not contain special tokens)
            token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
                for sequence pairs
            already_has_special_tokens: (default False) Set to True if the token list is already formated with
                special tokens for the model

        Returns:
            A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        r   r  )rE   r  r  rr  r#   r#   r$   rl  ,  s    z+PreTrainedTokenizer.get_special_tokens_mask)r3  skip_special_tokensrI   c                 C   s~   t |tr(|| jkr| j| S | |S g }|D ]H}t|}|rL|| jkrLq0|| jkrh|| j|  q0|| | q0|S )a'   Converts a single index or a sequence of indices (integers) in a token "
            (resp.) a sequence of tokens (str), using the vocabulary and added tokens.

            Args:
                skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
        )rB   r!   r   _convert_id_to_tokenr   r  )rE   r3  rs  rX   indexr#   r#   r$   convert_ids_to_tokens?  s    	




z)PreTrainedTokenizer.convert_ids_to_tokensru  rI   c                 C   s   t d S r?   r   rE   ru  r#   r#   r$   rt  X  s    z(PreTrainedTokenizer._convert_id_to_token)rX   rI   c                 C   s   d | |S )z Converts a sequence of tokens (string) in a single string.
            The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
            but we often want to remove sub-word tokenization artifacts at the same time.
         )r   rv  )rE   rX   r#   r#   r$   convert_tokens_to_string[  s    z,PreTrainedTokenizer.convert_tokens_to_string	token_idsrs  clean_up_tokenization_spacesrI   c           
      C   s   | j ||d}g }g }|D ]L}|r.|| jkr.q|| jkr\|rP|| | g }|| q|| q|r||| | d|}|r| |}	|	S |S dS )aH  
        Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
        with options to remove special tokens and clean up tokenization spaces.
        Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.

        Args:
            token_ids: list of tokenized input ids. Can be obtained using the `encode` or `encode_plus` methods.
            skip_special_tokens: if set to True, will replace special tokens.
            clean_up_tokenization_spaces: if set to True, will clean up the tokenization spaces.
        )rs  ry  N)rv  r   r   r  rz  r   clean_up_tokenization)
rE   r|  rs  r}  Zfiltered_tokensZ	sub_textsZcurrent_sub_textr  r  
clean_textr#   r#   r$   decodeb  s&    


zPreTrainedTokenizer.decode)
out_stringrI   c                 C   sX   |  dd dd dd dd d	d
 dd dd dd dd dd} | S )zx Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
        z ..z ??z !!z ,,z ' 'z n'tzn'tz 'mz'mz 'sz'sz 'vez'vez 'rez're)replace)r  r#   r#   r$   r~    s<            	 z)PreTrainedTokenizer.clean_up_tokenization)N)F)NTNr   r6  FN)NTNr   r6  FFNNNFFF)TNr   r6  FFNNNFFFF)NNTr   r6  FNNNFFF)Nr   r6  r   )N)N)NF)F)FT)Fr   r   r   r    r   r   rJ   r"   r   r   r   r   r!   r   r   r3   r`  ra  rt   r   r   r   r   r   r   r   rA   r   classmethodr   r   r  r   r  r   r  	TextInputr0  r(  r   r2  r4  r   PreTokenizedInputEncodedInputr	   ri  r=  r<   r;  TextInputPairPreTokenizedInputPairEncodedInputPairrY  r	  rV  rL  r/  r   rj  rk  r  rl  rv  rt  rz  r  staticmethodr~  ru   r#   r#   rF   r$   r     s  
6






6
 0)
1
6H	       K              &              D             
 I    
<            (r   c                       s(  e Zd ZdZed fddZeedddZeeddd	Z	ee
dd
dZeedddZedddZdd Zd;eee
 ee
 ee
 e
e
e
eeef dddZeedddZeee dddZd<ee e
edddZeeeef  ed d!d"Zeed# fd$d%Zd=e
ed&d'd(Zd>e ee  e
ee d)d*d+Z!d?eee  ee" ee# ee$ f e
ee eee
e
ee ee
 ee
 e
e
e
e
e%d/d0d1Z&d@ee e#f eee e#f  e
ee e
eee
ee
 ee
 ee
 e
e
e
e%d2d3d4Z'dAee e
e
ed5d6d7Z(ee)e d8d9d:Z*  Z+S )BPreTrainedTokenizerFasta2   Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).

    Inherit from PreTrainedTokenizer.

    Handle all the shared methods for tokenization and special tokens as well as methods
    downloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.

    This class also contain the added tokens in a unified way on top of all tokenizers so we don't
    have to handle the specific vocabulary augmentation methods of the various underlying
    dictionary structures (BPE, sentencepiece...).

    Class attributes (overridden by derived classes):

        - ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file
            required by the model, and as associated values, the filename for saving the associated file (string).
        - ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys
            being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the
            `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the
            associated pretrained vocabulary file.
        - ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained
            models, and as associated values, the maximum length of the sequence inputs of this model, or None if the
            model has no maximum input size.
        - ``pretrained_init_configuration``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the
            pretrained models, and as associated values, a dictionnary of specific arguments to pass to the
            ``__init__``method of the tokenizer class for this pretrained model when loading the tokenizer with the
            ``from_pretrained()`` method.

    Args:
        - ``tokenizer`` (`BaseTokenizerFast`): A Fast tokenizer from the HuggingFace tokenizer library (in low level Rust language)
        - ``model_max_length``: (`Optional`) int: the maximum length in number of tokens for the inputs to the transformer model.
            When the tokenizer is loaded with `from_pretrained`, this will be set to the value stored for the associated
            model in ``max_model_input_sizes`` (see above). If no value is provided, will default to VERY_LARGE_INTEGER (`int(1e30)`).
            no associated max_length can be found in ``max_model_input_sizes``.
        - ``padding_side``: (`Optional`) string: the side on which the model should have padding applied.
            Should be selected between ['right', 'left']
        - ``model_input_names``: (`Optional`) List[string]: the list of the forward pass inputs accepted by the
            model ("token_type_ids", "attention_mask"...).
        - ``bos_token``: (`Optional`) string: a beginning of sentence token.
            Will be associated to ``self.bos_token`` and ``self.bos_token_id``
        - ``eos_token``: (`Optional`) string: an end of sentence token.
            Will be associated to ``self.eos_token`` and ``self.eos_token_id``
        - ``unk_token``: (`Optional`) string: an unknown token.
            Will be associated to ``self.unk_token`` and ``self.unk_token_id``
        - ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence).
            Will be associated to ``self.sep_token`` and ``self.sep_token_id``
        - ``pad_token``: (`Optional`) string: a padding token.
            Will be associated to ``self.pad_token`` and ``self.pad_token_id``
        - ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence
            leveraging self-attention along the full depth of the model).
            Will be associated to ``self.cls_token`` and ``self.cls_token_id``
        - ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language
            modeling). Will be associated to ``self.mask_token`` and ``self.mask_token_id``
        - ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens.
            Adding all special tokens here ensure they won't be split by the tokenization process.
            Will be associated to ``self.additional_special_tokens`` and ``self.additional_special_tokens_ids``
    )r.   c                    s*   t |tstd|| _t jf | d S )NzZTokenizer should be an instance of a Tokenizer provided by HuggingFace tokenizers library.)rB   BaseTokenizerFastrW   
_tokenizerr@   rA   )rE   r.   r   rF   r#   r$   rA     s    
z PreTrainedTokenizerFast.__init__rT   c                 C   s   | j S r?   )r  rP   r#   r#   r$   backend_tokenizer  s    z)PreTrainedTokenizerFast.backend_tokenizerc                 C   s
   | j j jS r?   )r  decoderrP   r#   r#   r$   r    s    zPreTrainedTokenizerFast.decoderc                 C   s   dS )NTr#   rP   r#   r#   r$   r     s    zPreTrainedTokenizerFast.is_fastc                 C   s   | j jddS )NFZwith_added_tokensr  Zget_vocab_sizerP   r#   r#   r$   r     s    z"PreTrainedTokenizerFast.vocab_sizec                 C   s   | j jddS )NTr  r  rP   r#   r#   r$   r     s    zPreTrainedTokenizerFast.__len__c                 C   s   | j | dS )zc Update the backend fast tokenizer.
            Override method from base class SpecialTokensMixin N)r  r  r   r#   r#   r$   r     s    z-PreTrainedTokenizerFast._maybe_update_backendNF)r>   r9  r?  r@  rA  rB  rC  rI   c                 C   s0  |dkrd| j k}|dkr$d| j k}|r@|jdk	r@|g|j }n|g}tt}	|D ]d}
|	d |
j |rz|	d |
j |r|	d |
j |r|	d |
j |rR|	d |
j	 qR|dk	r,|	
 D ]`\}}|dkrt rt||	|< q|dkrt rt||	|< q|dk	rtd	| q|	S )
a   Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.

            Overflowing tokens are converted to additional examples (like batches) so the output values of
            the dict are lists (overflows) of lists (tokens).

            If return_tensors is not None, these lists of lists are converted to 2-D tensors
            for input_ids, token_type_ids and attention_mask.
            Output shape: (overflows, sequence length)
        Nr   r   r:  rg  Zoffset_mappingrZ  r]  r^  )r   Zoverflowingr   r   r  r3  Ztype_idsr   rg  offsetsrS   r   rZ  r_  r   rb  rc  r8   r9   r:   )rE   r>   r9  r?  r@  rA  rB  rC  rU   Zencoding_dicter   r   r#   r#   r$   _convert_encoding  s>    


z)PreTrainedTokenizerFast._convert_encoding)r  rI   c                 C   s   | j |}|d kr| jS |S r?   )r  Ztoken_to_idr   )rE   r  ru  r#   r#   r$   r2  1	  s    z;PreTrainedTokenizerFast._convert_token_to_id_with_added_vocrw  c                 C   s   | j t|S r?   )r  Zid_to_tokenr!   rx  r#   r#   r$   rt  7	  s    z,PreTrainedTokenizerFast._convert_id_to_token)rX   rs  rI   c                 C   s   | j ||S r?   )r  r  )rE   rX   rs  r#   r#   r$   rz  :	  s    z0PreTrainedTokenizerFast.convert_tokens_to_string)r  rI   c                 C   s   t |tr|g}| j|S )ax  
        Add a list of new tokens to the tokenizer class. If the new tokens are not in the
        vocabulary, they are added to it with indices starting from length of the current vocabulary.

        Args:
            new_tokens: string or list of string or AddedTokenFast. Each string is a token to add.
            Tokens are only added if they are not already in the vocabulary. AddedTokenFast wrap a string token to let you personnalize it's behavior (Whether this token should only match against single word, whether this token should strip all potential whitespaces on the left side, Whether this token should strip all potential whitespaces on the right side...).
            See details for AddedToken in HuggingFace tokenizers library.

        Returns:
            Number of tokens added to the vocabulary.

        Examples::

            # Let's see how to increase the vocabulary of Bert model and tokenizer
            tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
            model = BertModel.from_pretrained('bert-base-uncased')

            num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
            print('We have added', num_added_toks, 'tokens')
            model.resize_token_embeddings(len(tokenizer))  # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
        )rB   rJ   r  r  )rE   r  r#   r#   r$   r  =	  s    
z"PreTrainedTokenizerFast.add_tokens)r  rI   c                    s(   t  |}t| }| j| |S r?   )r@   r  r,   rQ   r  )rE   r  Znum_added_tokensrX   rF   r#   r$   r  X	  s    z*PreTrainedTokenizerFast.add_special_tokens)r   rI   c                 C   s   | j |S r?   )r  r   )rE   r   r#   r#   r$   r   f	  s    z1PreTrainedTokenizerFast.num_special_tokens_to_add)r  r   r  rI   c                 C   s   | j |||jS r?   )r  r=  rX   )rE   r  r   r  r#   r#   r$   r0  i	  s    z PreTrainedTokenizerFast.tokenizeTr   r6  )rO  r  r/   r0   r8  r2   r>  r9  r?  r@  rA  rB  rC  rR  rI   c                    sh  t |tstdt||p2d k	o2t|dk}|rJjd krJtdtj||||j	jj
jd	B |rHg }t|D ]\}}t |ttfstd|t|tt|dkot |d ttf}tjjj|r|d n|ddd	d
}|r&tjjjdd |d D ddd	d
}nd }j|||}|| qnft|dkrt |d ttfrjj|d d|i}njj|d |d}|g}njj||d}W 5 Q R X fdd|D }i }|d  D ]R  fdd|D }dkrtj|dd}ndkr.tj|dd}|| < qr^tdd t|D }||d< t||S )Nz2batch_text_or_text_pairs has to be a list (got {})r   zTUnable to set proper padding strategy as the tokenizer does not have a padding tokenr-   zbatch_encode_plus(..., is_pretokenized=True) requires batch_text_or_text_pairs to be either List[List[str]] or List[Tuple[List[str], List[str]]] but sample at index {} is of type {}r   Fr  TZgrowing_offsetsc                 S   s   g | ]}d |fqS  r#   )rp   sr#   r#   r$   r  	  s     z=PreTrainedTokenizerFast.batch_encode_plus.<locals>.<listcomp>r  c                    s$   g | ]}j | d qS ))r>   r9  r?  r@  rA  rB  rC  )r  )rp   r>   )r@  rC  rA  rB  r9  r?  rE   r#   r$   r  	  s   
c                    s   g | ]}|  D ]}|qqS r#   r#   )rp   rH   r  )r   r#   r$   r  	  s     
  rZ  )Zaxisr]  Zdimc                 S   s"   g | ]\}}|gt |d   qS )r:  r  )rp   r  encr#   r#   r$   r  	  s     overflow_to_sample_mapping)rB   r   rW   r:   r   r   r4   r;   r  r3   r5   r   r
  r   r   ri  rC   mergeencode_batchpost_processr  r=  rO   rZ  stackrb  r,   r<   )rE   rO  r  r/   r0   r8  r2   r>  r9  r?  r@  rA  rB  rC  rR  r   rU   r  sampleZis_pairZencodings_textZencodings_pairr>   rX   Z	sanitizedr  r  r#   )r   r@  rC  rA  rB  r9  r?  rE   r$   rY  n	  s    

 "  


z)PreTrainedTokenizerFast.batch_encode_plus)r  r7  r  r/   r2   r0   r8  r>  r9  r?  r@  rA  rB  rC  rI   c                 K   s>  |rt |trt|dkr| jj|dd}tj|dd}t |trl| jjdd |D dd}tj|dd}n&|d krzd }ntdt	|t	|| j
|||}t| j||	|
||||d	|}ntd
t	|t	|n>|r||fgn|g}| j|f|||||	|
|||||d|}|	s:tdd | D |j}|S )Nr   Fr  Tr  c                 S   s   g | ]}d |fqS r  r#   )rp   pr#   r#   r$   r  
  s     z7PreTrainedTokenizerFast.encode_plus.<locals>.<listcomp>zrencode_plus(..., is_pretokenized=True) requires text and text_pair to be List[str] but got (text={}, text_pair={}))r9  r?  r@  rA  rB  rC  zdencode_plus(..., is_pretokenized=True) requires text to be List[str] but got (text={}, text_pair={}))r  r/   r0   r8  r9  r?  r@  rA  rB  rC  r2   c                 S   s8   i | ]0\}}|t |d kr0t|d  tr0|d  n|qS )r   )r   rB   r   )rp   r   r   r#   r#   r$   rs   K
  s    z7PreTrainedTokenizerFast.encode_plus.<locals>.<dictcomp>)rB   r   r   r  r  rC   r  r   r:   r   r  r<   r  rY  rS   rU   )rE   r  r7  r  r/   r2   r0   r8  r>  r9  r?  r@  rA  rB  rC  r   r>   Zencoding_pairZbatched_outputZbatched_inputr#   r#   r$   r;  	  s~    
  	 z#PreTrainedTokenizerFast.encode_plusr{  c                 C   s(   | j ||}|r | |}|S |S d S r?   )r  r  r~  )rE   r|  rs  r}  r  r  r#   r#   r$   r  T
  s
    
zPreTrainedTokenizerFast.decode)r   rI   c                 C   sJ   t j|r| j|}n(t jt j|\}}| jj||d}t|S )N)name)r   r   r   r  saver#  abspathr   )rE   r   filesfolderfiler#   r#   r$   r   _
  s
    z'PreTrainedTokenizerFast.save_vocabulary)NNNFFF)F)F)NF)TNr   r6  FFNNNFFFF)NTNFr   r6  FNNNFFF)FT),r   r   r   r    r  rA   rt   r  DecoderFastr  ri  r   r!   r   r   r   rC   r	   r   rJ   r   r  r2  rt  r   rz  r   r   r  r	  r  r   r  r0  r  r  r  r<   rY  r;  r  r   r   ru   r#   r#   rF   r$   r    s   9
      
8     
                           
]     r  c                 C   sN   |  |jdd}|dkr*| dd|f S | dd|f |dd|f fS dS )z=Remove columns that are populated exclusively by pad_token_idr   r  N)neany)r:  r4   r   Zkeep_column_maskr#   r#   r$   
trim_batchi
  s    r  )N)Fr    r   r(   r*  r   loggingr*   r   r  collectionsr   r   
contextlibr   typingr   r   r   r   r	   r
   r   r   Z
tokenizersr   r   r   rC   Ztokenizers.decodersr   r  Ztokenizers.implementationsr   r  Z
file_utilsr   r   r   r   r   r   Z
tensorflowrZ  rb  	getLoggerr   r8   r   r   r   r!   r   rm  rJ   r  r  r  r  r  r  r   r%   r,   ri  r;   r<   rv   r   r  r  r#   r#   r#   r$   <module>   s   ( 
@  7 ^           s   P 