U
    #c/                     @   s^   d dl mZmZmZ d dlmZmZ d dlmZm	Z	m
Z
mZmZ e
eef ZG dd dZdS )    )	TokenizerEncoding
AddedToken)TokenizedSequenceTokenizedSequenceWithOffsets)ListUnionTupleOptionalDictc                   @   s  e Zd ZdEedddZdd Zeeddd	ZdFee	e
ef dddZdGeedddZdHee
 ee ee ee
 ee dddZdd ZdIeee ee
 dddZdd Zeee
ef  eddd Zeee
ef  ed!d"d#Ze
e
d$d%d&ZdJeeef eed'd(d)ZdKeee ee f eee d*d+d,ZdLe
ee
 eed-d.d/ZdMeee
ee
e
f f  eee d0d1d2ZdNee ee e
d3d4d5Z dOeee  ee e
d6d7d8Z!e
ee d9d:d;Z"eee
 d<d=d>Z#dPe
ee
 d?d@dAZ$dQeee eedBdCdDZ%dS )RBaseTokenizerN)	tokenizerc                 C   s   || _ |d k	r|ni | _d S )N)
_tokenizer_parameters)selfr   
parameters r   M/tmp/pip-unpacked-wheel-zq7pv0lh/tokenizers/implementations/base_tokenizer.py__init__
   s    zBaseTokenizer.__init__c                 C   s(   d | j ddd | j D S )Nz!Tokenizer(vocabulary_size={}, {})z, c                 s   s"   | ]\}}|d  t | V  qdS )=N)str).0kvr   r   r   	<genexpr>   s     z)BaseTokenizer.__repr__.<locals>.<genexpr>)formatr   get_vocab_sizejoinr   itemsr   r   r   r   __repr__   s    zBaseTokenizer.__repr__)is_pairreturnc                 C   s   | j |S )z
        Return the number of special tokens that would be added for single/pair sentences.
        :param is_pair: Boolean indicating if the input would be a single sentence or a pair
        :return:
        )r   num_special_tokens_to_add)r   r!   r   r   r   r#      s    z'BaseTokenizer.num_special_tokens_to_addT)with_added_tokensr"   c                 C   s   | j j|dS )z Returns the vocabulary

        Args:
            with_added_tokens: boolean:
                Whether to include the added tokens in the vocabulary

        Returns:
            The vocabulary
        r$   )r   	get_vocabr   r$   r   r   r   r&      s    
zBaseTokenizer.get_vocabc                 C   s   | j j|dS )z Return the size of vocabulary, with or without added tokens.

        Args:
            with_added_tokens: (`optional`) bool:
                Whether to count in added special tokens or not

        Returns:
            Size of vocabulary
        r%   )r   r   r'   r   r   r   r   (   s    
zBaseTokenizer.get_vocab_sizerightr   [PAD]	directionpad_idpad_type_id	pad_token
max_lengthc                 C   s   | j j|||||dS )a   Change the padding strategy

        Args:
            direction: (`optional`) str:
                Can be one of: `right` or `left`

            pad_id: (`optional`) unsigned int:
                The indice to be used when padding

            pad_type_id: (`optional`) unsigned int:
                The type indice to be used when padding

            pad_token: (`optional`) str:
                The pad token to be used when padding

            max_length: (`optional`) unsigned int:
                If specified, the length at which to pad. If not specified
                we pad using the size of the longest sequence in a batch
        r*   )r   enable_padding)r   r+   r,   r-   r.   r/   r   r   r   r0   4   s    zBaseTokenizer.enable_paddingc                 C   s
   | j  S )z Disable padding )r   
no_paddingr   r   r   r   r1   W   s    zBaseTokenizer.no_paddinglongest_first)r/   stridestrategyc                 C   s   | j j|||dS )a   Change the truncation options

        Args:
            max_length: unsigned int:
                The maximum length at which to truncate

            stride: (`optional`) unsigned int:
                The length of the previous first sequence to be included
                in the overflowing sequence

            strategy: (`optional) str:
                Can be one of `longest_first`, `only_first` or `only_second`
        )r3   r4   )r   enable_truncation)r   r/   r3   r4   r   r   r   r5   [   s    zBaseTokenizer.enable_truncationc                 C   s
   | j  S )z Disable truncation )r   no_truncationr   r   r   r   r6   m   s    zBaseTokenizer.no_truncation)tokensr"   c                 C   s   | j |S )aQ   Add the given tokens to the vocabulary

        Args:
            tokens: List[Union[str, AddedToken]]:
                A list of tokens to add to the vocabulary. Each token can either be
                a string, or an instance of AddedToken

        Returns:
            The number of tokens that were added to the vocabulary
        )r   
add_tokens)r   r7   r   r   r   r8   q   s    zBaseTokenizer.add_tokens)special_tokensr"   c                 C   s   | j |S )a   Add the given special tokens to the vocabulary, and treat them as special tokens.

        The special tokens will never be processed by the model, and will be
        removed while decoding.

        Args:
            tokens: List[Union[str, AddedToken]]:
                A list of special tokens to add to the vocabulary. Each token can either be
                a string, or an instance of AddedToken

        Returns:
            The number of tokens that were added to the vocabulary
        )r   add_special_tokens)r   r9   r   r   r   r:   ~   s    z BaseTokenizer.add_special_tokens)sequencer"   c                 C   s   | j |S )z Normalize the given sequence

        Args:
            sequence: str:
                The sequence to normalize

        Returns:
            The normalized string
        )r   	normalize)r   r;   r   r   r   r<      s    
zBaseTokenizer.normalize)r;   type_idr"   c                 C   s   | j j||S )a6   Encode the given sequence. Let us skip the Normalizer and PreTokenizer by providing
        already tokenized substrings.

        A sequence can either be:
            - `TokenizedSequence`: (`List[str]`)
            - `TokenizedSequenceWithOffsets: (`List[Tuple[str, Offsets]]`) where Offsets is
            a Tuple[int, int].

        If the Offsets are not provided, they will be automatically generated, making the hypothesis
        that all the tokens in the `TokenizedSequence` are contiguous in the original string.

        Args:
            sequence: Union[TokenizedSequence, TokenizedSequenceWithOffsets]
                Either a TokenizedSequence or a TokenizedSequenceWithOffsets

            type_id: int:
                The type id of the given sequence

        Returns:
            An Encoding
        )r   modelencode)r   r;   r=   r   r   r   encode_tokenized   s    zBaseTokenizer.encode_tokenized)	sequencesr=   r"   c                 C   s   | j j||S )a   Encode the given batch of sequence. Let us skip the Normalizer and PreTokenizer by
        providing already tokenized substrings.

        A sequence can either be:
            - `TokenizedSequence`: (`List[str]`)
            - `TokenizedSequenceWithOffsets: (`List[Tuple[str, Offsets]]`) where Offsets is
            a Tuple[int, int].

        If the Offsets are not provided, they will be automatically generated, making the hypothesis
        that all the tokens in the `TokenizedSequence` are contiguous in the original string.

        Args:
            sequences: Union[List[TokenizedSequence], List[TokenizedSequenceWithOffsets]]
                A list of sequence. Each sequence is either a TokenizedSequence or a
                TokenizedSequenceWithOffsets

            type_id: int:
                The type if of the given sequence

        Returns:
            A list of Encoding
        )r   r>   encode_batch)r   rA   r=   r   r   r   encode_tokenized_batch   s    z$BaseTokenizer.encode_tokenized_batch)r;   pairr:   r"   c                 C   s    |dkrt d| j|||S )aa   Encode the given sequence

        Args:
            sequence: str:
                The sequence to encode

            pair: (`optional`) Optional[str]:
                The optional pair sequence

            add_special_tokens: bool:
                Whether to add the special tokens while encoding.

        Returns:
            An Encoding
        Nz,None input is not valid. Should be a string.)
ValueErrorr   r?   )r   r;   rD   r:   r   r   r   r?      s    zBaseTokenizer.encode)rA   r:   r"   c                 C   s   |dkrt d| j||S )a   Encode the given sequences or pair of sequences

        Args:
            sequences: List[Union[str, Tuple[str, str]]]:
                A list of sequences or pair of sequences. The list can contain both
                at the same time.

            add_special_tokens: bool:
                Whether to add the special tokens while encoding.

        Returns:
            A list of Encoding
        NzSNone input is not valid. Should be a list of strings or a list of tuple of strings.)rE   r   rB   )r   rA   r:   r   r   r   rB      s
    zBaseTokenizer.encode_batch)idsskip_special_tokensr"   c                 C   s    |dkrt d| jj||dS )aT   Decode the given list of ids to a string sequence

        Args:
            ids: List[unsigned int]:
                A list of ids to be decoded

            skip_special_tokens: (`optional`) boolean:
                Whether to remove all the special tokens from the output string

        Returns:
            The decoded string
        Nz6None input is not valid. Should be a list of integers.rG   )rE   r   decode)r   rF   rG   r   r   r   rI      s    zBaseTokenizer.decode)rA   rG   r"   c                 C   s    |dkrt d| jj||dS )a}   Decode the list of sequences to a list of string sequences

        Args:
            sequences: List[List[unsigned int]]:
                A list of sequence of ids to be decoded

            skip_special_tokens: (`optional`) boolean:
                Whether to remove all the special tokens from the output strings

        Returns:
            A list of decoded strings
        Nz<None input is not valid. Should be list of list of integers.rH   )rE   r   decode_batch)r   rA   rG   r   r   r   rJ     s    zBaseTokenizer.decode_batch)tokenr"   c                 C   s   | j |S )z Convert the given token to its corresponding id

        Args:
            token: str:
                The token to convert

        Returns:
            The corresponding id if it exists, None otherwise
        )r   token_to_id)r   rK   r   r   r   rL   &  s    
zBaseTokenizer.token_to_id)idr"   c                 C   s   | j |S )z Convert the given token id to its corresponding string

        Args:
            token: id:
                The token id to convert

        Returns:
            The corresponding string if it exists, None otherwise
        )r   id_to_token)r   rM   r   r   r   rN   2  s    
zBaseTokenizer.id_to_token)	directorynamec                 C   s   | j jj||dS )a   Save the current model to the given directory

        Args:
            directory: str:
                A path to the destination directory

            name: (Optional) str:
                The name of the tokenizer, to be used in the saved files
        )rP   )r   r>   save)r   rO   rP   r   r   r   rQ   >  s    
zBaseTokenizer.save)encodingrD   r:   r"   c                 C   s   | j |||S )ak   Apply all the post-processing steps to the given encodings.

        The various steps are:
            1. Truncate according to global params (provided to `enable_truncation`)
            2. Apply the PostProcessor
            3. Pad according to global params. (provided to `enable_padding`)

        Args:
            encoding: Encoding:
                The main Encoding to post process

            pair: Optional[Encoding]:
                An optional pair Encoding

            add_special_tokens: bool:
                Whether to add special tokens

        Returns:
            The resulting Encoding
        )r   post_process)r   rR   rD   r:   r   r   r   rS   J  s    zBaseTokenizer.post_process)N)T)T)r(   r   r   r)   N)r   r2   )r   )r   )NT)T)T)T)N)NT)&__name__
__module____qualname__r   r   r    boolintr#   r   r   r&   r   r
   r0   r1   r5   r6   r   r   r   r8   r:   r<   r   r   r   r@   rC   r?   r	   rB   rI   rJ   rL   rN   rQ   rS   r   r   r   r   r   	   s        #      
          
      r   N)Z
tokenizersr   r   r   Ztokenizers.modelsr   r   typingr   r   r	   r
   r   rX   ZOffsetsr   r   r   r   r   <module>   s   