# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = [
"ElectraTokenizer",
]
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"electra-small": 512,
"electra-base": 512,
"electra-large": 512,
"chinese-electra-base": 512,
"chinese-electra-small": 512,
"ernie-health-chinese": 512,
}
[docs]
class ElectraTokenizer(PretrainedTokenizer):
"""
Constructs an Electra tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
This tokenizer inherits from :class:`~paddlenlp.transformers.tokenizer_utils.PretrainedTokenizer`
which contains most of the main methods. For more information regarding those methods,
please refer to this superclass.
Args:
vocab_file (str):
The vocabulary file path (ends with '.txt') required to instantiate
a `WordpieceTokenizer`.
do_lower_case (bool):
Whether or not to lowercase the input when tokenizing.
Defaults to `True`.
unk_token (str):
A special token representing the *unknown (out-of-vocabulary)* token.
An unknown token is set to be `unk_token` inorder to be converted to an ID.
Defaults to "[UNK]".
sep_token (str):
A special token separating two different sentences in the same input.
Defaults to "[SEP]".
pad_token (str):
A special token used to make arrays of tokens the same size for batching purposes.
Defaults to "[PAD]".
cls_token (str):
A special token used for sequence classification. It is the last token
of the sequence when built with special tokens. Defaults to "[CLS]".
mask_token (str):
A special token representing a masked token. This is the token used
in the masked language modeling task which the model tries to predict the original unmasked ones.
Defaults to "[MASK]".
Examples:
.. code-block::
from paddlenlp.transformers import ElectraTokenizer
tokenizer = ElectraTokenizer.from_pretrained('electra-small')
tokens = tokenizer('He was a puppeteer')
'''
{'input_ids': [101, 2002, 2001, 1037, 13997, 11510, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0]}
'''
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"electra-small": "https://bj.bcebos.com/paddlenlp/models/transformers/electra/electra-small-vocab.txt",
"electra-base": "https://bj.bcebos.com/paddlenlp/models/transformers/electra/electra-base-vocab.txt",
"electra-large": "https://bj.bcebos.com/paddlenlp/models/transformers/electra/electra-large-vocab.txt",
"chinese-electra-base": "http://bj.bcebos.com/paddlenlp/models/transformers/chinese-electra-base/vocab.txt",
"chinese-electra-small": "http://bj.bcebos.com/paddlenlp/models/transformers/chinese-electra-small/vocab.txt",
"ernie-health-chinese": "https://paddlenlp.bj.bcebos.com/models/transformers/ernie-health-chinese/vocab.txt",
}
}
pretrained_init_configuration = {
"electra-small": {"do_lower_case": True},
"electra-base": {"do_lower_case": True},
"electra-large": {"do_lower_case": True},
"chinese-electra-base": {"do_lower_case": True},
"chinese-electra-small": {"do_lower_case": True},
"ernie-health-chinese": {"do_lower_case": True},
}
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)
)
self.do_lower_case = do_lower_case
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
"""
Return the size of vocabulary.
Returns:
int: The size of vocabulary.
"""
return len(self.vocab)
[docs]
def get_vocab(self):
return dict(self.vocab._token_to_idx, **self.added_tokens_encoder)
def _tokenize(self, text):
"""
End-to-end tokenization for Electra models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
[docs]
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also remove
`##` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
Examples:
.. code-block::
from paddlenlp.transformers import ElectraTokenizer
tokenizer = ElectraTokenizer.from_pretrained('electra-small')
tokens = tokenizer.tokenize('He was a puppeteer')
string = tokenizer.convert_tokens_to_string(tokens)
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
[docs]
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
int: Number of tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
[docs]
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
A ELECTRA offset_mapping has the following format:
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: ``(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_ids_0 (:obj:`List[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (:obj:`List[tuple]`, `optional`):
Optional second list of char offsets for offset mapping pairs.
Returns:
List[tuple]: List of char offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)] + offset_mapping_1 + [(0, 0)]
[docs]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A ELECTRA sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 + _sep) * [1]
[docs]
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optional): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
List[int]: The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]