paddlenlp.transformers.ernie_gram.modeling 源代码

# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Optional, Tuple

import paddle
import paddle.nn as nn
from paddle import Tensor

from .. import PretrainedModel, register_base_model
from ..ernie.modeling import ErniePooler
from ..model_outputs import (
    BaseModelOutputWithPoolingAndCrossAttentions,
    QuestionAnsweringModelOutput,
    SequenceClassifierOutput,
    TokenClassifierOutput,
    tuple_output,
)

__all__ = [
    "ErnieGramModel",
    "ErnieGramPretrainedModel",
    "ErnieGramForSequenceClassification",
    "ErnieGramForTokenClassification",
    "ErnieGramForQuestionAnswering",
]


class ErnieGramEmbeddings(nn.Layer):
    r"""
    Include embeddings from word, position and token_type embeddings.
    """

    def __init__(
        self,
        vocab_size,
        emb_size=128,
        hidden_dropout_prob=0.1,
        max_position_embeddings=512,
        type_vocab_size=2,
        pad_token_id=0,
        rel_pos_size=None,
        num_attention_heads=None,
    ):
        super(ErnieGramEmbeddings, self).__init__()

        self.word_embeddings = nn.Embedding(vocab_size, emb_size, padding_idx=pad_token_id)
        self.position_embeddings = nn.Embedding(max_position_embeddings, emb_size)
        self.token_type_embeddings = nn.Embedding(type_vocab_size, emb_size)
        if rel_pos_size and num_attention_heads:
            self.rel_pos_embeddings = nn.Embedding(rel_pos_size, num_attention_heads)
        self.layer_norm = nn.LayerNorm(emb_size)
        self.dropout = nn.Dropout(hidden_dropout_prob)

    def forward(
        self,
        input_ids: Optional[Tensor] = None,
        token_type_ids: Optional[Tensor] = None,
        position_ids: Optional[Tensor] = None,
        inputs_embeds: Optional[Tensor] = None,
        past_key_values_length: int = 0,
    ):

        if inputs_embeds is None:
            inputs_embeds = self.word_embeddings(input_ids)

        input_shape = paddle.shape(inputs_embeds)[:-1]

        if position_ids is None:
            # maybe need use shape op to unify static graph and dynamic graph
            ones = paddle.ones(input_shape, dtype="int64")
            seq_length = paddle.cumsum(ones, axis=1)
            position_ids = seq_length - ones

            if past_key_values_length > 0:
                position_ids = position_ids + past_key_values_length

            position_ids.stop_gradient = True

        if token_type_ids is None:
            token_type_ids_shape = input_shape
            token_type_ids = paddle.zeros(token_type_ids_shape, dtype="int64")

        position_embeddings = self.position_embeddings(position_ids)
        token_type_embeddings = self.token_type_embeddings(token_type_ids)

        embeddings = inputs_embeds + position_embeddings + token_type_embeddings
        embeddings = self.layer_norm(embeddings)
        embeddings = self.dropout(embeddings)
        return embeddings


[文档]class ErnieGramPretrainedModel(PretrainedModel): r""" An abstract class for pretrained ERNIE-Gram models. It provides ERNIE-Gram related `model_config_file`, `resource_files_names`, `pretrained_resource_files_map`, `pretrained_init_configuration`, `base_model_prefix` for downloading and loading pretrained models. See :class:`~paddlenlp.transformers.model_utils.PretrainedModel` for more details. """ pretrained_init_configuration = { "ernie-gram-zh": { "attention_probs_dropout_prob": 0.1, "emb_size": 768, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "max_position_embeddings": 512, "num_attention_heads": 12, "num_hidden_layers": 12, "type_vocab_size": 2, "vocab_size": 18018, }, "ernie-gram-zh-finetuned-dureader-robust": { "attention_probs_dropout_prob": 0.1, "emb_size": 768, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "max_position_embeddings": 512, "num_attention_heads": 12, "num_hidden_layers": 12, "type_vocab_size": 2, "vocab_size": 18018, }, } pretrained_resource_files_map = { "model_state": { "ernie-gram-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/ernie_gram_zh/ernie_gram_zh.pdparams", "ernie-gram-zh-finetuned-dureader-robust": "https://bj.bcebos.com/paddlenlp/models/transformers/ernie-gram-zh-finetuned-dureader-robust/model_state.pdparams", }, } base_model_prefix = "ernie_gram"
[文档] def init_weights(self, layer): """Initialization hook""" if isinstance(layer, (nn.Linear, nn.Embedding)): # only support dygraph, use truncated_normal and make it inplace # and configurable later if isinstance(layer.weight, paddle.Tensor): layer.weight.set_value( paddle.tensor.normal( mean=0.0, std=self.initializer_range if hasattr(self, "initializer_range") else self.ernie_gram.config["initializer_range"], shape=layer.weight.shape, ) ) elif isinstance(layer, nn.LayerNorm): layer._epsilon = 1e-5
[文档]@register_base_model class ErnieGramModel(ErnieGramPretrainedModel): r""" The bare ERNIE-Gram Model transformer outputting raw hidden-states. This model inherits from :class:`~paddlenlp.transformers.model_utils.PretrainedModel`. Refer to the superclass documentation for the generic methods. This model is also a Paddle `paddle.nn.Layer <https://www.paddlepaddle.org.cn/documentation /docs/en/api/paddle/fluid/dygraph/layers/Layer_en.html>`__ subclass. Use it as a regular Paddle Layer and refer to the Paddle documentation for all matter related to general usage and behavior. Args: vocab_size (int): Vocabulary size of the ERNIE-Gram model. Also is the vocab size of token embedding matrix. hidden_size (int, optional): Dimensionality of the embedding layer, encoder layers and pooler layer. Defaults to `768`. num_hidden_layers (int, optional): Number of hidden layers in the Transformer encoder. Defaults to `12`. num_attention_heads (int, optional): Number of attention heads for each attention layer in the Transformer encoder. Defaults to `12`. intermediate_size (int, optional): Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to ff layers are firstly projected from `hidden_size` to `intermediate_size`, and then projected back to `hidden_size`. Typically `intermediate_size` is larger than `hidden_size`. Defaults to `3072`. hidden_act (str, optional): The non-linear activation function in the feed-forward layer. ``"gelu"``, ``"relu"`` and any other paddle supported activation functions are supported. Defaults to ``"gelu"``. hidden_dropout_prob (float, optional): The dropout probability for all fully connected layers in the embeddings and encoders. Defaults to `0.1`. attention_probs_dropout_prob (float, optional): The dropout probability used in MultiHeadAttention in all encoder layers to drop some attention target. Defaults to `0.1`. max_position_embeddings (int, optional): The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input sequence. Defaults to `512`. type_vocab_size (int, optional): The vocabulary size of the `token_type_ids`. Defaults to `2`. initializer_range (float, optional): The standard deviation of the normal initializer for initializing all weight matrices. Defaults to `0.02`. .. note:: A normal_initializer initializes weight matrices as normal distributions. See :meth:`ErniePretrainedModel._init_weights()` for how weights are initialized in `ErnieGramModel`. rel_pos_size (int, optional): The relative position size just for ERNIE-Gram English model. Defaults to None. pad_token_id(int, optional): The index of padding token in the token vocabulary. Defaults to `0`. """ def __init__( self, vocab_size, emb_size=768, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, pad_token_id=0, rel_pos_size=None, ): super(ErnieGramModel, self).__init__() self.pad_token_id = pad_token_id self.initializer_range = initializer_range self.embeddings = ErnieGramEmbeddings( vocab_size, emb_size, hidden_dropout_prob, max_position_embeddings, type_vocab_size, pad_token_id, rel_pos_size, num_attention_heads, ) encoder_layer = nn.TransformerEncoderLayer( hidden_size, num_attention_heads, intermediate_size, dropout=hidden_dropout_prob, activation=hidden_act, attn_dropout=attention_probs_dropout_prob, act_dropout=0, ) self.encoder = nn.TransformerEncoder(encoder_layer, num_hidden_layers) self.pooler = ErniePooler(hidden_size) self.apply(self.init_weights)
[文档] def forward( self, input_ids: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, position_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, past_key_values: Optional[Tuple[Tuple[Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (Tensor): Indices of input sequence tokens in the vocabulary. They are numerical representations of tokens that build the input sequence. It's data type should be `int64` and has a shape of [batch_size, sequence_length]. token_type_ids (Tensor, optional): Segment token indices to indicate first and second portions of the inputs. Indices can be either 0 or 1: - 0 corresponds to a **sentence A** token, - 1 corresponds to a **sentence B** token. It's data type should be `int64` and has a shape of [batch_size, sequence_length]. Defaults to None, which means no segment embeddings is added to token embeddings. position_ids (Tensor, optional): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. Defaults to `None`. Shape as `(batch_sie, num_tokens)` and dtype as `int32` or `int64`. attention_mask (Tensor, optional): Mask used in multi-head attention to avoid performing attention on to some unwanted positions, usually the paddings or the subsequent positions. Its data type can be int, float and bool. When the data type is bool, the `masked` tokens have `False` values and the others have `True` values. When the data type is int, the `masked` tokens have `0` values and the others have `1` values. When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values. It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`. For example, its shape can be [batch_size, sequence_length], [batch_size, sequence_length, sequence_length], [batch_size, num_attention_heads, sequence_length, sequence_length]. We use whole-word-mask in ERNIE, so the whole word will have the same value. For example, "使用" as a word, "使" and "用" will have the same value. Defaults to `None`, which means nothing needed to be prevented attention to. inputs_embeds (Tensor, optional): If you want to control how to convert `inputs_ids` indices into associated vectors, you can pass an embedded representation directly instead of passing `inputs_ids`. past_key_values (tuple(tuple(Tensor)), optional): The length of tuple equals to the number of layers, and each inner tuple haves 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`) which contains precomputed key and value hidden states of the attention blocks. If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, optional): If set to `True`, `past_key_values` key value states are returned. Defaults to `None`. output_hidden_states (bool, optional): Whether to return the hidden states of all layers. Defaults to `False`. output_attentions (bool, optional): Whether to return the attentions tensors of all attention layers. Defaults to `False`. return_dict (bool, optional): Whether to return a :class:`~paddlenlp.transformers.model_outputs.ModelOutput` object. If `False`, the output will be a tuple of tensors. Defaults to `False`. Returns: tuple: Returns tuple (``sequence_output``, ``pooled_output``). With the fields: - `sequence_output` (Tensor): Sequence of hidden-states at the last layer of the model. It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size]. - `pooled_output` (Tensor): The output of first token (`[CLS]`) in sequence. We "pool" the model by simply taking the hidden state corresponding to the first token. Its data type should be float32 and its shape is [batch_size, hidden_size]. Example: .. code-block:: import paddle from paddlenlp.transformers import ErnieGramModel, ErnieGramTokenizer tokenizer = ErnieGramTokenizer.from_pretrained('ernie-gram-zh') model = ErnieGramModel.from_pretrained('ernie-gram-zh) inputs = tokenizer("欢迎使用百度飞桨!") inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()} sequence_output, pooled_output = model(**inputs) """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time.") # init the default bool value output_attentions = output_attentions if output_attentions is not None else False output_hidden_states = output_hidden_states if output_hidden_states is not None else False return_dict = return_dict if return_dict is not None else False use_cache = use_cache if use_cache is not None else False past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] if attention_mask is None: attention_mask = paddle.unsqueeze( (input_ids == self.pad_token_id).astype(self.pooler.dense.weight.dtype) * -1e4, axis=[1, 2] ) if past_key_values is not None: batch_size = past_key_values[0][0].shape[0] past_mask = paddle.zeros([batch_size, 1, 1, past_key_values_length], dtype=attention_mask.dtype) attention_mask = paddle.concat([past_mask, attention_mask], axis=-1) # For 2D attention_mask from tokenizer elif attention_mask.ndim == 2: attention_mask = paddle.unsqueeze(attention_mask, axis=[1, 2]).astype(paddle.get_default_dtype()) attention_mask = (1.0 - attention_mask) * -1e4 attention_mask.stop_gradient = True embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) self.encoder._use_cache = use_cache # To be consistent with HF encoder_outputs = self.encoder( embedding_output, attention_mask, cache=past_key_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if isinstance(encoder_outputs, type(input_ids)): sequence_output = encoder_outputs pooled_output = self.pooler(sequence_output) return (sequence_output, pooled_output) else: sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, )
[文档]class ErnieGramForTokenClassification(ErnieGramPretrainedModel): r""" ERNIE-Gram Model with a linear layer on top of the hidden-states output layer, designed for token classification tasks like NER tasks. Args: ernie_gram (`ErnieGramModel`): An instance of `ErnieGramModel`. num_classes (int, optional): The number of classes. Default to `2`. dropout (float, optional): The dropout probability for output of ERNIE-Gram. If None, use the same value as `hidden_dropout_prob` of `ErnieGramModel` instance `ernie_gram`. Defaults to `None`. """ def __init__(self, ernie_gram, num_classes=2, dropout=None): super(ErnieGramForTokenClassification, self).__init__() self.num_classes = num_classes self.ernie_gram = ernie_gram # allow ernie_gram to be config self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie_gram.config["hidden_dropout_prob"]) self.classifier = nn.Linear( self.ernie_gram.config["hidden_size"], num_classes, weight_attr=paddle.ParamAttr( initializer=nn.initializer.TruncatedNormal(std=self.ernie_gram.config["initializer_range"]) ), ) self.apply(self.init_weights)
[文档] def forward( self, input_ids: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, position_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, labels: Optional[Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (Tensor): See :class:`ErnieGramModel`. token_type_ids (Tensor, optional): See :class:`ErnieGramModel`. position_ids (Tensor, optional): See :class:`ErnieGramModel`. attention_mask (Tensor, optional): See :class:`ErnieGramModel`. labels (Tensor of shape `(batch_size, sequence_length)`, optional): Labels for computing the token classification loss. Indices should be in `[0, ..., num_classes - 1]`. inputs_embeds(Tensor, optional): See :class:`ErnieGramModel`. output_hidden_states (bool, optional): Whether to return the hidden states of all layers. Defaults to `False`. output_attentions (bool, optional): Whether to return the attentions tensors of all attention layers. Defaults to `False`. return_dict (bool, optional): Whether to return a :class:`~paddlenlp.transformers.model_outputs.TokenClassifierOutput` object. If `False`, the output will be a tuple of tensors. Defaults to `False`. Returns: Tensor: Returns tensor `logits`, a tensor of the input token classification logits. Shape as `[batch_size, sequence_length, num_classes]` and dtype as `float32`. Example: .. code-block:: import paddle from paddlenlp.transformers import ErnieGramForTokenClassification, ErnieGramTokenizer tokenizer = ErnieGramTokenizer.from_pretrained('ernie-gram-zh') model = ErnieGramForTokenClassification.from_pretrained('ernie-gram-zh') inputs = tokenizer("欢迎使用百度飞桨!") inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()} logits = model(**inputs) """ outputs = self.ernie_gram( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = paddle.nn.CrossEntropyLoss() loss = loss_fct(logits.reshape((-1, self.num_classes)), labels.reshape((-1,))) if not return_dict: output = (logits,) + outputs[2:] return tuple_output(output, loss) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
[文档]class ErnieGramForQuestionAnswering(ErnieGramPretrainedModel): """ ERNIE-Gram Model with a linear layer on top of the hidden-states output to compute `span_start_logits` and `span_end_logits`, designed for question-answering tasks like SQuAD.. Args: ernie_gram (`ErnieGramModel`): An instance of `ErnieGramModel`. """ def __init__(self, ernie_gram): super(ErnieGramForQuestionAnswering, self).__init__() self.ernie_gram = ernie_gram # allow ernie_gram to be config self.classifier = nn.Linear(self.ernie_gram.config["hidden_size"], 2) self.apply(self.init_weights)
[文档] def forward( self, input_ids: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, position_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, start_positions: Optional[Tensor] = None, end_positions: Optional[Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (Tensor): See :class:`ErnieGramModel`. token_type_ids (Tensor, optional): See :class:`ErnieGramModel`. position_ids (Tensor, optional): See :class:`ErnieGramModel`. attention_mask (Tensor, optional): See :class:`ErnieGramModel`. inputs_embeds(Tensor, optional): See :class:`ErnieGramModel`. start_positions (Tensor of shape `(batch_size,)`, optional): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (Tensor of shape `(batch_size,)`, optional): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. output_hidden_states (bool, optional): Whether to return the hidden states of all layers. Defaults to `False`. output_attentions (bool, optional): Whether to return the attentions tensors of all attention layers. Defaults to `False`. return_dict (bool, optional): Whether to return a :class:`~paddlenlp.transformers.model_outputs.QuestionAnsweringModelOutput` object. If `False`, the output will be a tuple of tensors. Defaults to `False`. Returns: tuple: Returns tuple (`start_logits`, `end_logits`). With the fields: - `start_logits` (Tensor): A tensor of the input token classification logits, indicates the start position of the labelled span. Its data type should be float32 and its shape is [batch_size, sequence_length]. - `end_logits` (Tensor): A tensor of the input token classification logits, indicates the end position of the labelled span. Its data type should be float32 and its shape is [batch_size, sequence_length]. Example: .. code-block:: import paddle from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer tokenizer = ErnieGramTokenizer.from_pretrained('ernie-gram-zh') model = ErnieGramForQuestionAnswering.from_pretrained('ernie-gram-zh') inputs = tokenizer("欢迎使用百度飞桨!") inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()} logits = model(**inputs) """ outputs = self.ernie_gram( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.classifier(outputs[0]) logits = paddle.transpose(logits, perm=[2, 0, 1]) start_logits, end_logits = paddle.unstack(x=logits, axis=0) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if start_positions.ndim > 1: start_positions = start_positions.squeeze(-1) if start_positions.ndim > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = paddle.shape(start_logits)[1] start_positions = start_positions.clip(0, ignored_index) end_positions = end_positions.clip(0, ignored_index) loss_fct = paddle.nn.CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return tuple_output(output, total_loss) return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
[文档]class ErnieGramForSequenceClassification(ErnieGramPretrainedModel): r""" ERNIE-Gram Model with a linear layer on top of the output layer, designed for sequence classification/regression tasks like GLUE tasks. Args: ernie_gram (ErnieGramModel): An instance of `paddlenlp.transformers.ErnieGramModel`. num_classes (int, optional): The number of classes. Default to `2`. dropout (float, optional): The dropout probability for output of ERNIE-Gram. If None, use the same value as `hidden_dropout_prob` of `paddlenlp.transformers.ErnieGramModel` instance. Defaults to `None`. """ def __init__(self, ernie_gram, num_classes=2, dropout=None): super(ErnieGramForSequenceClassification, self).__init__() self.num_classes = num_classes self.ernie_gram = ernie_gram # allow ernie gram to be config self.dropout = nn.Dropout(dropout if dropout is not None else self.ernie_gram.config["hidden_dropout_prob"]) self.classifier = nn.Linear(self.ernie_gram.config["hidden_size"], num_classes) self.apply(self.init_weights)
[文档] def forward( self, input_ids: Optional[Tensor] = None, token_type_ids: Optional[Tensor] = None, position_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, labels: Optional[Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (Tensor): See :class:`ErnieGramModel`. token_type_ids (Tensor, optional): See :class:`ErnieGramModel`. position_ids (Tensor, optional): See :class:`ErnieGramModel`. attention_mask (Tensor, optional): See :class:`BertModel`. labels (Tensor of shape `(batch_size,)`, optional): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., num_classes - 1]`. If `num_classes == 1` a regression loss is computed (Mean-Square loss), If `num_classes > 1` a classification loss is computed (Cross-Entropy). inputs_embeds(Tensor, optional): See :class:`ErnieGramModel`. output_hidden_states (bool, optional): Whether to return the hidden states of all layers. Defaults to `False`. output_attentions (bool, optional): Whether to return the attentions tensors of all attention layers. Defaults to `False`. return_dict (bool, optional): Whether to return a :class:`~paddlenlp.transformers.model_outputs.SequenceClassifierOutput` object. If `False`, the output will be a tuple of tensors. Defaults to `False`. Returns: Tensor: Returns tensor `logits`, a tensor of the input text classification logits. Shape as `[batch_size, num_classes]` and dtype as float32. Example: .. code-block:: import paddle from paddlenlp.transformers import ErnieGramForSequenceClassification, ErnieGramTokenizer tokenizer = ErnieGramTokenizer.from_pretrained('ernie-gram-zh') model = ErnieGramForSequenceClassification.from_pretrained('ernie-gram-zh') inputs = tokenizer("欢迎使用百度飞桨!") inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()} logits = model(**inputs) """ outputs = self.ernie_gram( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = self.dropout(outputs[1]) logits = self.classifier(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_classes == 1: loss_fct = paddle.nn.MSELoss() loss = loss_fct(logits, labels) elif labels.dtype == paddle.int64 or labels.dtype == paddle.int32: loss_fct = paddle.nn.CrossEntropyLoss() loss = loss_fct(logits.reshape((-1, self.num_classes)), labels.reshape((-1,))) else: loss_fct = paddle.nn.BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return tuple_output(output, loss) return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )