Source code for paddlenlp.transformers.nezha.tokenizer

# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

from paddlenlp.transformers import (
    BasicTokenizer,
    PretrainedTokenizer,
    WordpieceTokenizer,
)

__all__ = ["NeZhaTokenizer"]

PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
    "nezha-base-chinese": 512,
    "nezha-large-chinese": 512,
    "nezha-base-wwm-chinese": 512,
    "nezha-large-wwm-chinese": 512,
}


[docs]class NeZhaTokenizer(PretrainedTokenizer): """ Constructs a NeZha tokenizer. It uses a basic tokenizer to do punctuation splitting, lower casing and so on, and follows a WordPiece tokenizer to tokenize as subwords. This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer` which contains most of the main methods. For more information regarding those methods, please refer to this superclass. Args: vocab_file (str): The vocabulary file path (ends with '.txt') required to instantiate a `WordpieceTokenizer`. do_lower_case (bool): Whether or not to lowercase the input when tokenizing. Defaults to`True`. unk_token (str): A special token representing the *unknown (out-of-vocabulary)* token. An unknown token is set to be `unk_token` inorder to be converted to an ID. Defaults to "[UNK]". sep_token (str): A special token separating two different sentences in the same input. Defaults to "[SEP]". pad_token (str): A special token used to make arrays of tokens the same size for batching purposes. Defaults to "[PAD]". cls_token (str): A special token used for sequence classification. It is the last token of the sequence when built with special tokens. Defaults to "[CLS]". mask_token (str): A special token representing a masked token. This is the token used in the masked language modeling task which the model tries to predict the original unmasked ones. Defaults to "[MASK]". Examples: .. code-block:: from paddlenlp.transformers import NeZhaTokenizer tokenizer = NeZhaTokenizer.from_pretrained('nezha-base-chinese') inputs = tokenizer('欢迎使用百度飞桨!') print(inputs) ''' {'input_ids': [101, 3614, 6816, 886, 4500, 4636, 2428, 7607, 3444, 8013, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} ''' """ resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained pretrained_resource_files_map = { "vocab_file": { "nezha-base-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/nezha/nezha-chinese-vocab.txt", "nezha-base-wwm-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/nezha/nezha-chinese-vocab.txt", "nezha-large-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/nezha/nezha-chinese-vocab.txt", "nezha-large-wwm-chinese": "http://bj.bcebos.com/paddlenlp/models/transformers/nezha/nezha-chinese-vocab.txt", } } pretrained_init_configuration = { "nezha-base-chinese": {"do_lower_case": False}, "nezha-base-wwm-chinese": {"do_lower_case": False}, "nezha-large-chinese": {"do_lower_case": False}, "nezha-large-wwm-chinese": {"do_lower_case": False}, } padding_side = "right" max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, vocab_file, do_lower_case=True, unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs ): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the " "vocabulary from a pretrained model please use " "`tokenizer = NeZhaTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file) ) self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=unk_token) @property def vocab_size(self): """ Return the size of vocabulary. Returns: int: The size of vocabulary. """ return len(self.vocab) def _tokenize(self, text): """ End-to-end tokenization for NeZha models. Args: text (str): The text to be tokenized. Returns: list: A list of string representing converted tokens. """ split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens
[docs] def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (list of string) to a single string. Since the usage of WordPiece introducing `##` to concat subwords, also removes `##` when converting. Args: tokens (list): A list of string representing tokens to be converted. Returns: str: Converted string from tokens. Examples: .. code-block:: from paddlenlp.transformers import NeZhaTokenizer tokenizer = NeZhaTokenizer.from_pretrained('nezha-base-chinese') tokens = tokenizer.tokenize('欢迎使用百度飞桨!') ''' ['欢', '迎', '使', '用', '百', '度', '飞', '桨', '!'] ''' strings = tokenizer.convert_tokens_to_string(tokens) ''' 欢 迎 使 用 百 度 飞 桨 ! ''' """ out_string = " ".join(tokens).replace(" ##", "").strip() return out_string
[docs] def num_special_tokens_to_add(self, pair=False): """ Returns the number of added tokens when encoding a sequence with special tokens. Args: pair(bool): Whether the input is a sequence pair or a single sequence. Defaults to `False` and the input is a single sequence. Returns: int: Number of tokens added to sequences. """ token_ids_0 = [] token_ids_1 = [] return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
[docs] def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A NeZha sequence has the following format: - single sequence: ``[CLS] X [SEP]`` - pair of sequences: ``[CLS] A [SEP] B [SEP]`` Args: token_ids_0 (List[int]): List of IDs to which the special tokens will be added. token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to `None`. Returns: List[int]: List of input_id with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] _cls = [self.cls_token_id] _sep = [self.sep_token_id] return _cls + token_ids_0 + _sep + token_ids_1 + _sep
[docs] def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None): """ Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. A NeZha offset_mapping has the following format: - single sequence: ``(0,0) X (0,0)`` - pair of sequences: ``(0,0) A (0,0) B (0,0)`` Args: offset_mapping_ids_0 (List[tuple]): List of wordpiece offsets to which the special tokens will be added. offset_mapping_ids_1 (List[tuple], optional): Optional second list of wordpiece offsets for offset mapping pairs. Defaults to `None`. Returns: List[tuple]: A list of wordpiece offsets with the appropriate offsets of special tokens. """ if offset_mapping_1 is None: return [(0, 0)] + offset_mapping_0 + [(0, 0)] return [(0, 0)] + offset_mapping_0 + [(0, 0)] + offset_mapping_1 + [(0, 0)]
[docs] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A NeZha sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (List[int]): A list of `inputs_ids` for the first sequence. token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to None. Returns: List[int]: List of token_type_id according to the given sequence(s). """ _sep = [self.sep_token_id] _cls = [self.cls_token_id] if token_ids_1 is None: return len(_cls + token_ids_0 + _sep) * [0] return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1]
[docs] def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``encode`` methods. Args: token_ids_0 (List[int]): A list of `inputs_ids` for the first sequence. token_ids_1 (List[int], optional): Optional second list of IDs for sequence pairs. Defaults to `None`. already_has_special_tokens (bool, optional): Whether or not the token list is already formatted with special tokens for the model. Defaults to `False`. Returns: List[int]: The list of integers either be 0 or 1: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1]
[docs] def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab