mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 03:01:07 +06:00
1256 lines
53 KiB
Python
1256 lines
53 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
|
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch OpenAI GPT-2 model."""
|
|
|
|
import os
|
|
from dataclasses import dataclass
|
|
from typing import Optional, Tuple
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.utils.checkpoint
|
|
from torch.nn import CrossEntropyLoss, MSELoss
|
|
|
|
from ...activations import ACT2FN
|
|
from ...file_utils import (
|
|
ModelOutput,
|
|
add_code_sample_docstrings,
|
|
add_start_docstrings,
|
|
add_start_docstrings_to_model_forward,
|
|
replace_return_docstrings,
|
|
)
|
|
from ...modeling_outputs import (
|
|
BaseModelOutputWithPastAndCrossAttentions,
|
|
CausalLMOutputWithCrossAttentions,
|
|
SequenceClassifierOutputWithPast,
|
|
)
|
|
from ...modeling_utils import (
|
|
Conv1D,
|
|
PreTrainedModel,
|
|
SequenceSummary,
|
|
find_pruneable_heads_and_indices,
|
|
prune_conv1d_layer,
|
|
)
|
|
from ...utils import logging
|
|
from ...utils.model_parallel_utils import assert_device_map, get_device_map
|
|
from .configuration_gpt2 import GPT2Config
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
_CONFIG_FOR_DOC = "GPT2Config"
|
|
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
|
|
|
|
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [
|
|
"gpt2",
|
|
"gpt2-medium",
|
|
"gpt2-large",
|
|
"gpt2-xl",
|
|
"distilgpt2",
|
|
# See all GPT-2 models at https://huggingface.co/models?filter=gpt2
|
|
]
|
|
|
|
|
|
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
|
|
"""Load tf checkpoints in a pytorch model"""
|
|
try:
|
|
import re
|
|
|
|
import tensorflow as tf
|
|
except ImportError:
|
|
logger.error(
|
|
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
|
|
"https://www.tensorflow.org/install/ for installation instructions."
|
|
)
|
|
raise
|
|
tf_path = os.path.abspath(gpt2_checkpoint_path)
|
|
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
|
|
# Load weights from TF model
|
|
init_vars = tf.train.list_variables(tf_path)
|
|
names = []
|
|
arrays = []
|
|
for name, shape in init_vars:
|
|
logger.info("Loading TF weight {} with shape {}".format(name, shape))
|
|
array = tf.train.load_variable(tf_path, name)
|
|
names.append(name)
|
|
arrays.append(array.squeeze())
|
|
|
|
for name, array in zip(names, arrays):
|
|
name = name[6:] # skip "model/"
|
|
name = name.split("/")
|
|
pointer = model
|
|
for m_name in name:
|
|
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
|
|
scope_names = re.split(r"(\d+)", m_name)
|
|
else:
|
|
scope_names = [m_name]
|
|
if scope_names[0] == "w" or scope_names[0] == "g":
|
|
pointer = getattr(pointer, "weight")
|
|
elif scope_names[0] == "b":
|
|
pointer = getattr(pointer, "bias")
|
|
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
|
|
pointer = getattr(pointer, scope_names[0])
|
|
pointer = getattr(pointer, "weight")
|
|
else:
|
|
pointer = getattr(pointer, scope_names[0])
|
|
if len(scope_names) >= 2:
|
|
num = int(scope_names[1])
|
|
pointer = pointer[num]
|
|
try:
|
|
assert (
|
|
pointer.shape == array.shape
|
|
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
|
|
except AssertionError as e:
|
|
e.args += (pointer.shape, array.shape)
|
|
raise
|
|
logger.info("Initialize PyTorch weight {}".format(name))
|
|
pointer.data = torch.from_numpy(array)
|
|
return model
|
|
|
|
|
|
class Attention(nn.Module):
|
|
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
|
|
super().__init__()
|
|
|
|
n_state = nx # in Attention: n_state=768 (nx=n_embd)
|
|
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
|
|
assert n_state % config.n_head == 0
|
|
self.register_buffer(
|
|
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
|
|
)
|
|
self.register_buffer("masked_bias", torch.tensor(-1e4))
|
|
self.n_head = config.n_head
|
|
self.split_size = n_state
|
|
self.scale = scale
|
|
self.is_cross_attention = is_cross_attention
|
|
if self.is_cross_attention:
|
|
self.c_attn = Conv1D(2 * n_state, nx)
|
|
self.q_attn = Conv1D(n_state, nx)
|
|
else:
|
|
self.c_attn = Conv1D(3 * n_state, nx)
|
|
self.c_proj = Conv1D(n_state, nx)
|
|
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
|
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
|
self.pruned_heads = set()
|
|
|
|
def prune_heads(self, heads):
|
|
if len(heads) == 0:
|
|
return
|
|
heads, index = find_pruneable_heads_and_indices(
|
|
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
|
|
)
|
|
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
|
|
|
|
# Prune conv1d layers
|
|
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
|
|
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
|
|
|
|
# Update hyper params
|
|
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
|
|
self.n_head = self.n_head - len(heads)
|
|
self.pruned_heads = self.pruned_heads.union(heads)
|
|
|
|
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
|
|
w = torch.matmul(q, k)
|
|
if self.scale:
|
|
w = w / (float(v.size(-1)) ** 0.5)
|
|
nd, ns = w.size(-2), w.size(-1)
|
|
|
|
if not self.is_cross_attention:
|
|
# if only "normal" attention layer implements causal mask
|
|
mask = self.bias[:, :, ns - nd : ns, :ns]
|
|
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
|
|
|
|
if attention_mask is not None:
|
|
# Apply the attention mask
|
|
w = w + attention_mask
|
|
|
|
w = nn.Softmax(dim=-1)(w)
|
|
w = self.attn_dropout(w)
|
|
|
|
# Mask heads if we want to
|
|
if head_mask is not None:
|
|
w = w * head_mask
|
|
|
|
outputs = (torch.matmul(w, v),)
|
|
if output_attentions:
|
|
outputs += (w,)
|
|
return outputs
|
|
|
|
def merge_heads(self, x):
|
|
x = x.permute(0, 2, 1, 3).contiguous()
|
|
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
|
|
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
|
|
|
|
def split_heads(self, x, k=False):
|
|
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
|
|
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
|
|
if k:
|
|
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
|
|
else:
|
|
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
layer_past=None,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
use_cache=False,
|
|
output_attentions=False,
|
|
):
|
|
if encoder_hidden_states is not None:
|
|
assert hasattr(
|
|
self, "q_attn"
|
|
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
|
|
query = self.q_attn(hidden_states)
|
|
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
|
|
attention_mask = encoder_attention_mask
|
|
else:
|
|
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
|
|
|
|
query = self.split_heads(query)
|
|
key = self.split_heads(key, k=True)
|
|
value = self.split_heads(value)
|
|
if layer_past is not None:
|
|
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
|
|
key = torch.cat((past_key, key), dim=-1)
|
|
value = torch.cat((past_value, value), dim=-2)
|
|
|
|
if use_cache is True:
|
|
present = (key.transpose(-2, -1), value) # transpose to have same shapes
|
|
else:
|
|
present = None
|
|
|
|
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
|
|
a = attn_outputs[0]
|
|
|
|
a = self.merge_heads(a)
|
|
a = self.c_proj(a)
|
|
a = self.resid_dropout(a)
|
|
|
|
return (a, present) + attn_outputs[1:] # a, present, (attentions)
|
|
|
|
|
|
class MLP(nn.Module):
|
|
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
|
|
super().__init__()
|
|
nx = config.n_embd
|
|
self.c_fc = Conv1D(n_state, nx)
|
|
self.c_proj = Conv1D(nx, n_state)
|
|
self.act = ACT2FN[config.activation_function]
|
|
self.dropout = nn.Dropout(config.resid_pdrop)
|
|
|
|
def forward(self, x):
|
|
h = self.act(self.c_fc(x))
|
|
h2 = self.c_proj(h)
|
|
return self.dropout(h2)
|
|
|
|
|
|
class Block(nn.Module):
|
|
def __init__(self, n_ctx, config, scale=False):
|
|
super().__init__()
|
|
hidden_size = config.n_embd
|
|
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
|
|
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
self.attn = Attention(hidden_size, n_ctx, config, scale)
|
|
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
if config.add_cross_attention:
|
|
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
|
|
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
self.mlp = MLP(inner_dim, config)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
layer_past=None,
|
|
attention_mask=None,
|
|
head_mask=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
use_cache=False,
|
|
output_attentions=False,
|
|
):
|
|
attn_outputs = self.attn(
|
|
self.ln_1(hidden_states),
|
|
layer_past=layer_past,
|
|
attention_mask=attention_mask,
|
|
head_mask=head_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
)
|
|
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
|
|
outputs = attn_outputs[1:]
|
|
# residual connection
|
|
hidden_states = attn_output + hidden_states
|
|
|
|
if encoder_hidden_states is not None:
|
|
# add one self-attention block for cross-attention
|
|
assert hasattr(
|
|
self, "crossattention"
|
|
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
|
|
cross_attn_outputs = self.crossattention(
|
|
self.ln_cross_attn(hidden_states),
|
|
attention_mask=attention_mask,
|
|
head_mask=head_mask,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
output_attentions=output_attentions,
|
|
)
|
|
attn_output = cross_attn_outputs[0]
|
|
# residual connection
|
|
hidden_states = hidden_states + attn_output
|
|
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
|
|
|
|
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
|
|
# residual connection
|
|
hidden_states = hidden_states + feed_forward_hidden_states
|
|
|
|
if use_cache:
|
|
outputs = (hidden_states,) + outputs
|
|
else:
|
|
outputs = (hidden_states,) + outputs[1:]
|
|
|
|
return outputs # hidden_states, present, (attentions, cross_attentions)
|
|
|
|
|
|
class GPT2PreTrainedModel(PreTrainedModel):
|
|
"""
|
|
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
|
|
models.
|
|
"""
|
|
|
|
config_class = GPT2Config
|
|
load_tf_weights = load_tf_weights_in_gpt2
|
|
base_model_prefix = "transformer"
|
|
is_parallelizable = True
|
|
|
|
def __init__(self, *inputs, **kwargs):
|
|
super().__init__(*inputs, **kwargs)
|
|
|
|
def _init_weights(self, module):
|
|
"""Initialize the weights."""
|
|
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
|
|
# Slightly different from the TF version which uses truncated_normal for initialization
|
|
# cf https://github.com/pytorch/pytorch/pull/5617
|
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
|
|
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
|
|
module.bias.data.zero_()
|
|
elif isinstance(module, nn.LayerNorm):
|
|
module.bias.data.zero_()
|
|
module.weight.data.fill_(1.0)
|
|
|
|
|
|
@dataclass
|
|
class GPT2DoubleHeadsModelOutput(ModelOutput):
|
|
"""
|
|
Base class for outputs of models predicting if two sentences are consecutive or not.
|
|
|
|
Args:
|
|
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
|
|
Language modeling loss.
|
|
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
|
|
Multiple choice classification loss.
|
|
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
|
|
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
|
mc_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
|
|
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
|
|
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
|
|
Tuple of length :obj:`config.n_layers`, containing tuples of tensors of shape :obj:`(batch_size, num_heads,
|
|
sequence_length, embed_size_per_head)`).
|
|
|
|
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
|
|
:obj:`past_key_values` input) to speed up sequential decoding.
|
|
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
|
|
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
|
|
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
|
|
|
|
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
|
|
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
|
|
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
|
|
sequence_length, sequence_length)`.
|
|
|
|
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
|
|
heads.
|
|
"""
|
|
|
|
loss: Optional[torch.FloatTensor] = None
|
|
mc_loss: Optional[torch.FloatTensor] = None
|
|
logits: torch.FloatTensor = None
|
|
mc_logits: torch.FloatTensor = None
|
|
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
|
|
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
|
|
attentions: Optional[Tuple[torch.FloatTensor]] = None
|
|
|
|
|
|
GPT2_START_DOCSTRING = r"""
|
|
|
|
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
|
|
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
|
|
pruning heads etc.)
|
|
|
|
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
|
|
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
|
|
general usage and behavior.
|
|
|
|
Parameters:
|
|
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
|
|
Initializing with a config file does not load the weights associated with the model, only the
|
|
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
|
|
weights.
|
|
"""
|
|
|
|
GPT2_INPUTS_DOCSTRING = r"""
|
|
Args:
|
|
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
|
|
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
|
|
``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
|
|
sequence tokens in the vocabulary.
|
|
|
|
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
|
|
passed as ``input_ids``.
|
|
|
|
Indices can be obtained using :class:`~transformers.GPT2Tokenizer`. See
|
|
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
|
|
details.
|
|
|
|
`What are input IDs? <../glossary.html#input-ids>`__
|
|
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers`):
|
|
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
|
|
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
|
|
have their past given to this model should not be passed as ``input_ids`` as they have already been
|
|
computed.
|
|
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
|
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
|
|
|
|
- 1 for tokens that are **not masked**,
|
|
- 0 for tokens that are **masked**.
|
|
|
|
`What are attention masks? <../glossary.html#attention-mask>`__
|
|
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
|
|
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
|
|
1]``:
|
|
|
|
- 0 corresponds to a `sentence A` token,
|
|
- 1 corresponds to a `sentence B` token.
|
|
|
|
`What are token type IDs? <../glossary.html#token-type-ids>`_
|
|
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
|
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
|
|
config.max_position_embeddings - 1]``.
|
|
|
|
`What are position IDs? <../glossary.html#position-ids>`_
|
|
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
|
|
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
|
|
|
|
- 1 indicates the head is **not masked**,
|
|
- 0 indicates the head is **masked**.
|
|
|
|
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
|
|
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
|
|
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
|
|
vectors than the model's internal embedding lookup matrix.
|
|
|
|
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
|
|
:obj:`past_key_values`).
|
|
use_cache (:obj:`bool`, `optional`):
|
|
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
|
|
decoding (see :obj:`past_key_values`).
|
|
output_attentions (:obj:`bool`, `optional`):
|
|
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
|
|
tensors for more detail.
|
|
output_hidden_states (:obj:`bool`, `optional`):
|
|
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
|
|
more detail.
|
|
return_dict (:obj:`bool`, `optional`):
|
|
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
|
|
"""
|
|
PARALLELIZE_DOCSTRING = r"""
|
|
This is an experimental feature and is a subject to change at a moment's notice.
|
|
|
|
Uses a device map to distribute attention modules of the model across several devices. If no device map is given,
|
|
it will evenly distribute blocks across all devices.
|
|
|
|
Args:
|
|
device_map (:obj:`Dict[int, list]`, optional, defaults to None):
|
|
A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always
|
|
automatically mapped to the first device (for esoteric reasons). That means that the first device should
|
|
have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the
|
|
following number of attention modules:
|
|
|
|
- gpt2: 12
|
|
- gpt2-medium: 24
|
|
- gpt2-large: 36
|
|
- gpt2-xl: 48
|
|
|
|
Example::
|
|
|
|
# Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules:
|
|
model = GPT2LMHeadModel.from_pretrained('gpt2-xl')
|
|
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7, 8],
|
|
|
|
1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
|
|
2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34],
|
|
3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]}
|
|
model.parallelize(device_map)
|
|
"""
|
|
DEPARALLELIZE_DOCSTRING = r"""
|
|
Moves the model to cpu from a model parallel state.
|
|
|
|
Example::
|
|
|
|
# On a 4 GPU machine with gpt2-large:
|
|
model = GPT2LMHeadModel.from_pretrained('gpt2-large')
|
|
device_map = {0: [0, 1, 2, 3, 4, 5, 6, 7],
|
|
|
|
1: [8, 9, 10, 11, 12, 13, 14, 15],
|
|
2: [16, 17, 18, 19, 20, 21, 22, 23],
|
|
3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]}
|
|
model.parallelize(device_map) # Splits the model across several devices
|
|
model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache()
|
|
"""
|
|
|
|
|
|
@add_start_docstrings(
|
|
"The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
|
|
GPT2_START_DOCSTRING,
|
|
)
|
|
class GPT2Model(GPT2PreTrainedModel):
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
|
|
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
|
|
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
|
|
self.drop = nn.Dropout(config.embd_pdrop)
|
|
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
|
|
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
|
|
|
|
self.init_weights()
|
|
|
|
# Model parallel
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
|
|
@add_start_docstrings(PARALLELIZE_DOCSTRING)
|
|
def parallelize(self, device_map=None):
|
|
# Check validity of device_map
|
|
self.device_map = (
|
|
get_device_map(len(self.h), range(torch.cuda.device_count())) if device_map is None else device_map
|
|
)
|
|
assert_device_map(self.device_map, len(self.h))
|
|
self.model_parallel = True
|
|
self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys()))
|
|
self.last_device = "cuda:" + str(max(self.device_map.keys()))
|
|
self.wte = self.wte.to(self.first_device)
|
|
self.wpe = self.wpe.to(self.first_device)
|
|
# Load onto devices
|
|
for k, v in self.device_map.items():
|
|
for block in v:
|
|
cuda_device = "cuda:" + str(k)
|
|
self.h[block] = self.h[block].to(cuda_device)
|
|
# ln_f to last
|
|
self.ln_f = self.ln_f.to(self.last_device)
|
|
|
|
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
|
|
def deparallelize(self):
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
self.first_device = "cpu"
|
|
self.last_device = "cpu"
|
|
self.wte = self.wte.to("cpu")
|
|
self.wpe = self.wpe.to("cpu")
|
|
for index in range(len(self.h)):
|
|
self.h[index] = self.h[index].to("cpu")
|
|
self.ln_f = self.ln_f.to("cpu")
|
|
torch.cuda.empty_cache()
|
|
|
|
def get_input_embeddings(self):
|
|
return self.wte
|
|
|
|
def set_input_embeddings(self, new_embeddings):
|
|
self.wte = new_embeddings
|
|
|
|
def _prune_heads(self, heads_to_prune):
|
|
"""
|
|
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
|
|
"""
|
|
for layer, heads in heads_to_prune.items():
|
|
self.h[layer].attn.prune_heads(heads)
|
|
|
|
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
|
|
@add_code_sample_docstrings(
|
|
tokenizer_class=_TOKENIZER_FOR_DOC,
|
|
checkpoint="gpt2",
|
|
output_type=BaseModelOutputWithPastAndCrossAttentions,
|
|
config_class=_CONFIG_FOR_DOC,
|
|
)
|
|
def forward(
|
|
self,
|
|
input_ids=None,
|
|
past_key_values=None,
|
|
attention_mask=None,
|
|
token_type_ids=None,
|
|
position_ids=None,
|
|
head_mask=None,
|
|
inputs_embeds=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
use_cache=None,
|
|
output_attentions=None,
|
|
output_hidden_states=None,
|
|
return_dict=None,
|
|
):
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
if input_ids is not None and inputs_embeds is not None:
|
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
|
elif input_ids is not None:
|
|
input_shape = input_ids.size()
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
batch_size = input_ids.shape[0]
|
|
elif inputs_embeds is not None:
|
|
input_shape = inputs_embeds.size()[:-1]
|
|
batch_size = inputs_embeds.shape[0]
|
|
else:
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
|
|
|
if token_type_ids is not None:
|
|
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
|
if position_ids is not None:
|
|
position_ids = position_ids.view(-1, input_shape[-1])
|
|
|
|
if past_key_values is None:
|
|
past_length = 0
|
|
past_key_values = tuple([None] * len(self.h))
|
|
else:
|
|
past_length = past_key_values[0][0].size(-2)
|
|
if position_ids is None:
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
|
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
|
|
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
|
|
|
|
# Attention mask.
|
|
if attention_mask is not None:
|
|
assert batch_size > 0, "batch_size has to be defined and > 0"
|
|
attention_mask = attention_mask.view(batch_size, -1)
|
|
# We create a 3D attention mask from a 2D tensor mask.
|
|
# Sizes are [batch_size, 1, 1, to_seq_length]
|
|
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
|
# this attention mask is more simple than the triangular masking of causal attention
|
|
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
|
attention_mask = attention_mask[:, None, None, :]
|
|
|
|
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
|
# masked positions, this operation will create a tensor which is 0.0 for
|
|
# positions we want to attend and -10000.0 for masked positions.
|
|
# Since we are adding it to the raw scores before the softmax, this is
|
|
# effectively the same as removing these entirely.
|
|
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
|
attention_mask = (1.0 - attention_mask) * -10000.0
|
|
|
|
# If a 2D ou 3D attention mask is provided for the cross-attention
|
|
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
|
if self.config.add_cross_attention and encoder_hidden_states is not None:
|
|
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
|
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
|
if encoder_attention_mask is None:
|
|
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
|
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
|
else:
|
|
encoder_attention_mask = None
|
|
|
|
# Prepare head mask if needed
|
|
# 1.0 in head_mask indicate we keep the head
|
|
# attention_probs has shape bsz x n_heads x N x N
|
|
# head_mask has shape n_layer x batch x n_heads x N x N
|
|
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.wte(input_ids)
|
|
position_embeds = self.wpe(position_ids)
|
|
hidden_states = inputs_embeds + position_embeds
|
|
|
|
if token_type_ids is not None:
|
|
token_type_embeds = self.wte(token_type_ids)
|
|
hidden_states = hidden_states + token_type_embeds
|
|
|
|
hidden_states = self.drop(hidden_states)
|
|
|
|
output_shape = input_shape + (hidden_states.size(-1),)
|
|
|
|
presents = () if use_cache else None
|
|
all_self_attentions = () if output_attentions else None
|
|
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
|
all_hidden_states = () if output_hidden_states else None
|
|
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
|
|
|
|
# Model parallel
|
|
if self.model_parallel:
|
|
torch.cuda.set_device(hidden_states.device)
|
|
# Ensure layer_past is on same device as hidden_states (might not be correct)
|
|
if layer_past is not None:
|
|
layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
|
|
# Ensure that attention_mask is always on the same device as hidden_states
|
|
if attention_mask is not None:
|
|
attention_mask = attention_mask.to(hidden_states.device)
|
|
if isinstance(head_mask, torch.Tensor):
|
|
head_mask = head_mask.to(hidden_states.device)
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
if getattr(self.config, "gradient_checkpointing", False) and self.training:
|
|
|
|
if use_cache:
|
|
logger.warn(
|
|
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
|
|
"`use_cache=False`..."
|
|
)
|
|
use_cache = False
|
|
|
|
def create_custom_forward(module):
|
|
def custom_forward(*inputs):
|
|
# None for past_key_value
|
|
return module(*inputs, use_cache, output_attentions)
|
|
|
|
return custom_forward
|
|
|
|
outputs = torch.utils.checkpoint.checkpoint(
|
|
create_custom_forward(block),
|
|
hidden_states,
|
|
None,
|
|
attention_mask,
|
|
head_mask[i],
|
|
encoder_hidden_states,
|
|
encoder_attention_mask,
|
|
)
|
|
else:
|
|
outputs = block(
|
|
hidden_states,
|
|
layer_past=layer_past,
|
|
attention_mask=attention_mask,
|
|
head_mask=head_mask[i],
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
)
|
|
|
|
hidden_states = outputs[0]
|
|
if use_cache is True:
|
|
presents = presents + (outputs[1],)
|
|
|
|
if output_attentions:
|
|
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
|
|
if self.config.add_cross_attention:
|
|
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
|
|
|
|
# Model Parallel: If it's the last layer for that device, put things on the next device
|
|
if self.model_parallel:
|
|
for k, v in self.device_map.items():
|
|
if i == v[-1] and "cuda:" + str(k) != self.last_device:
|
|
hidden_states = hidden_states.to("cuda:" + str(k + 1))
|
|
|
|
hidden_states = self.ln_f(hidden_states)
|
|
|
|
hidden_states = hidden_states.view(*output_shape)
|
|
# Add last hidden state
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
if not return_dict:
|
|
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
|
|
|
|
return BaseModelOutputWithPastAndCrossAttentions(
|
|
last_hidden_state=hidden_states,
|
|
past_key_values=presents,
|
|
hidden_states=all_hidden_states,
|
|
attentions=all_self_attentions,
|
|
cross_attentions=all_cross_attentions,
|
|
)
|
|
|
|
|
|
@add_start_docstrings(
|
|
"""
|
|
The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
|
embeddings).
|
|
""",
|
|
GPT2_START_DOCSTRING,
|
|
)
|
|
class GPT2LMHeadModel(GPT2PreTrainedModel):
|
|
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
|
|
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.transformer = GPT2Model(config)
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
|
|
|
self.init_weights()
|
|
|
|
# Model parallel
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
|
|
@add_start_docstrings(PARALLELIZE_DOCSTRING)
|
|
def parallelize(self, device_map=None):
|
|
self.device_map = (
|
|
get_device_map(len(self.transformer.h), range(torch.cuda.device_count()))
|
|
if device_map is None
|
|
else device_map
|
|
)
|
|
assert_device_map(self.device_map, len(self.transformer.h))
|
|
self.transformer.parallelize(self.device_map)
|
|
self.lm_head = self.lm_head.to(self.transformer.first_device)
|
|
self.model_parallel = True
|
|
|
|
@add_start_docstrings(DEPARALLELIZE_DOCSTRING)
|
|
def deparallelize(self):
|
|
self.transformer.deparallelize()
|
|
self.transformer = self.transformer.to("cpu")
|
|
self.lm_head = self.lm_head.to("cpu")
|
|
self.model_parallel = False
|
|
torch.cuda.empty_cache()
|
|
|
|
def get_output_embeddings(self):
|
|
return self.lm_head
|
|
|
|
def set_output_embeddings(self, new_embeddings):
|
|
self.lm_head = new_embeddings
|
|
|
|
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
|
|
token_type_ids = kwargs.get("token_type_ids", None)
|
|
# only last token for inputs_ids if past is defined in kwargs
|
|
if past:
|
|
input_ids = input_ids[:, -1].unsqueeze(-1)
|
|
if token_type_ids is not None:
|
|
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
|
|
|
|
attention_mask = kwargs.get("attention_mask", None)
|
|
position_ids = kwargs.get("position_ids", None)
|
|
|
|
if attention_mask is not None and position_ids is None:
|
|
# create position_ids on the fly for batch generation
|
|
position_ids = attention_mask.long().cumsum(-1) - 1
|
|
position_ids.masked_fill_(attention_mask == 0, 1)
|
|
if past:
|
|
position_ids = position_ids[:, -1].unsqueeze(-1)
|
|
else:
|
|
position_ids = None
|
|
return {
|
|
"input_ids": input_ids,
|
|
"past_key_values": past,
|
|
"use_cache": kwargs.get("use_cache"),
|
|
"position_ids": position_ids,
|
|
"attention_mask": attention_mask,
|
|
"token_type_ids": token_type_ids,
|
|
}
|
|
|
|
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
|
|
@add_code_sample_docstrings(
|
|
tokenizer_class=_TOKENIZER_FOR_DOC,
|
|
checkpoint="gpt2",
|
|
output_type=CausalLMOutputWithCrossAttentions,
|
|
config_class=_CONFIG_FOR_DOC,
|
|
)
|
|
def forward(
|
|
self,
|
|
input_ids=None,
|
|
past_key_values=None,
|
|
attention_mask=None,
|
|
token_type_ids=None,
|
|
position_ids=None,
|
|
head_mask=None,
|
|
inputs_embeds=None,
|
|
encoder_hidden_states=None,
|
|
encoder_attention_mask=None,
|
|
labels=None,
|
|
use_cache=None,
|
|
output_attentions=None,
|
|
output_hidden_states=None,
|
|
return_dict=None,
|
|
):
|
|
r"""
|
|
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
|
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
|
|
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.transformer(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
head_mask=head_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
hidden_states = transformer_outputs[0]
|
|
|
|
# Set device for model parallelism
|
|
if self.model_parallel:
|
|
torch.cuda.set_device(self.transformer.first_device)
|
|
hidden_states = hidden_states.to(self.lm_head.weight.device)
|
|
|
|
lm_logits = self.lm_head(hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
# Shift so that tokens < n predict n
|
|
shift_logits = lm_logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
# Flatten the tokens
|
|
loss_fct = CrossEntropyLoss()
|
|
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
|
if not return_dict:
|
|
output = (lm_logits,) + transformer_outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return CausalLMOutputWithCrossAttentions(
|
|
loss=loss,
|
|
logits=lm_logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
cross_attentions=transformer_outputs.cross_attentions,
|
|
)
|
|
|
|
@staticmethod
|
|
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
|
|
"""
|
|
This function is used to re-order the :obj:`past_key_values` cache if
|
|
:meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
|
|
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
|
|
"""
|
|
return tuple(
|
|
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
|
for layer_past in past
|
|
)
|
|
|
|
|
|
@add_start_docstrings(
|
|
"""
|
|
The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for
|
|
RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the
|
|
input embeddings, the classification head takes as input the input of a specified classification token index in the
|
|
input sequence).
|
|
""",
|
|
GPT2_START_DOCSTRING,
|
|
)
|
|
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
config.num_labels = 1
|
|
self.transformer = GPT2Model(config)
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
|
|
self.multiple_choice_head = SequenceSummary(config)
|
|
|
|
self.init_weights()
|
|
|
|
# Model parallel
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
|
|
def get_output_embeddings(self):
|
|
return self.lm_head
|
|
|
|
def set_output_embeddings(self, new_embeddings):
|
|
self.lm_head = new_embeddings
|
|
|
|
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
|
|
token_type_ids = kwargs.get("token_type_ids", None)
|
|
# only last token for inputs_ids if past is defined in kwargs
|
|
if past:
|
|
input_ids = input_ids[:, -1].unsqueeze(-1)
|
|
if token_type_ids is not None:
|
|
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
|
|
|
|
attention_mask = kwargs.get("attention_mask", None)
|
|
position_ids = kwargs.get("position_ids", None)
|
|
|
|
if attention_mask is not None and position_ids is None:
|
|
# create position_ids on the fly for batch generation
|
|
position_ids = attention_mask.long().cumsum(-1) - 1
|
|
position_ids.masked_fill_(attention_mask == 0, 1)
|
|
if past:
|
|
position_ids = position_ids[:, -1].unsqueeze(-1)
|
|
else:
|
|
position_ids = None
|
|
|
|
return {
|
|
"input_ids": input_ids,
|
|
"past_key_values": past,
|
|
"use_cache": kwargs.get("use_cache"),
|
|
"position_ids": position_ids,
|
|
"attention_mask": attention_mask,
|
|
"token_type_ids": token_type_ids,
|
|
}
|
|
|
|
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
|
|
@replace_return_docstrings(output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC)
|
|
def forward(
|
|
self,
|
|
input_ids=None,
|
|
past_key_values=None,
|
|
attention_mask=None,
|
|
token_type_ids=None,
|
|
position_ids=None,
|
|
head_mask=None,
|
|
inputs_embeds=None,
|
|
mc_token_ids=None,
|
|
labels=None,
|
|
mc_labels=None,
|
|
use_cache=None,
|
|
output_attentions=None,
|
|
output_hidden_states=None,
|
|
return_dict=None,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input):
|
|
Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) -
|
|
1[``.
|
|
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
|
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
|
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size - 1]`` All labels set to
|
|
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size - 1]``
|
|
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`):
|
|
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
|
|
num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see
|
|
`input_ids` above)
|
|
|
|
Return:
|
|
|
|
Example::
|
|
|
|
>>> import torch
|
|
>>> from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
|
|
|
|
>>> tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
|
>>> model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
|
|
|
|
>>> # Add a [CLS] to the vocabulary (we should train it also!)
|
|
>>> num_added_tokens = tokenizer.add_special_tokens({'cls_token': '[CLS]'})
|
|
|
|
>>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
|
|
|
|
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
|
|
>>> encoded_choices = [tokenizer.encode(s) for s in choices]
|
|
>>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
|
|
|
|
>>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
|
|
>>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
|
|
|
|
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
|
|
>>> lm_logits = outputs.logits
|
|
>>> mc_logits = outputs.mc_logits
|
|
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.transformer(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
head_mask=head_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
|
|
hidden_states = transformer_outputs[0]
|
|
|
|
lm_logits = self.lm_head(hidden_states)
|
|
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
|
|
|
|
mc_loss = None
|
|
if mc_labels is not None:
|
|
loss_fct = CrossEntropyLoss()
|
|
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
|
|
lm_loss = None
|
|
if labels is not None:
|
|
shift_logits = lm_logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
loss_fct = CrossEntropyLoss()
|
|
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
|
if not return_dict:
|
|
output = (lm_logits, mc_logits) + transformer_outputs[1:]
|
|
if mc_loss is not None:
|
|
output = (mc_loss,) + output
|
|
return ((lm_loss,) + output) if lm_loss is not None else output
|
|
|
|
return GPT2DoubleHeadsModelOutput(
|
|
loss=lm_loss,
|
|
mc_loss=mc_loss,
|
|
logits=lm_logits,
|
|
mc_logits=mc_logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
)
|
|
|
|
@staticmethod
|
|
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
|
|
"""
|
|
This function is used to re-order the :obj:`past_key_values` cache if
|
|
:meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
|
|
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
|
|
"""
|
|
return tuple(
|
|
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
|
|
for layer_past in past
|
|
)
|
|
|
|
|
|
@add_start_docstrings(
|
|
"""
|
|
The GPT2 Model transformer with a sequence classification head on top (linear layer).
|
|
|
|
:class:`~transformers.GPT2ForSequenceClassification` uses the last token in order to do the classification, as
|
|
other causal models (e.g. GPT-1) do.
|
|
|
|
Since it does classification on the last token, it requires to know the position of the last token. If a
|
|
:obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each
|
|
row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot
|
|
guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take
|
|
the last value in each row of the batch).
|
|
""",
|
|
GPT2_START_DOCSTRING,
|
|
)
|
|
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
|
|
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
|
|
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.num_labels = config.num_labels
|
|
self.transformer = GPT2Model(config)
|
|
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
|
|
|
self.init_weights()
|
|
|
|
# Model parallel
|
|
self.model_parallel = False
|
|
self.device_map = None
|
|
|
|
@add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING)
|
|
@add_code_sample_docstrings(
|
|
tokenizer_class=_TOKENIZER_FOR_DOC,
|
|
checkpoint="microsoft/dialogrpt",
|
|
output_type=SequenceClassifierOutputWithPast,
|
|
config_class=_CONFIG_FOR_DOC,
|
|
)
|
|
def forward(
|
|
self,
|
|
input_ids=None,
|
|
past_key_values=None,
|
|
attention_mask=None,
|
|
token_type_ids=None,
|
|
position_ids=None,
|
|
head_mask=None,
|
|
inputs_embeds=None,
|
|
labels=None,
|
|
use_cache=None,
|
|
output_attentions=None,
|
|
output_hidden_states=None,
|
|
return_dict=None,
|
|
):
|
|
r"""
|
|
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
|
|
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
|
|
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
|
|
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.transformer(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
head_mask=head_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
hidden_states = transformer_outputs[0]
|
|
logits = self.score(hidden_states)
|
|
|
|
if input_ids is not None:
|
|
batch_size, sequence_length = input_ids.shape[:2]
|
|
else:
|
|
batch_size, sequence_length = inputs_embeds.shape[:2]
|
|
|
|
assert (
|
|
self.config.pad_token_id is not None or batch_size == 1
|
|
), "Cannot handle batch sizes > 1 if no padding token is defined."
|
|
if self.config.pad_token_id is None:
|
|
sequence_lengths = -1
|
|
else:
|
|
if input_ids is not None:
|
|
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
|
|
else:
|
|
sequence_lengths = -1
|
|
logger.warning(
|
|
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
|
f"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
|
)
|
|
|
|
pooled_logits = logits[range(batch_size), sequence_lengths]
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
if self.num_labels == 1:
|
|
# We are doing regression
|
|
loss_fct = MSELoss()
|
|
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
|
|
else:
|
|
loss_fct = CrossEntropyLoss()
|
|
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
|
|
|
if not return_dict:
|
|
output = (pooled_logits,) + transformer_outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return SequenceClassifierOutputWithPast(
|
|
loss=loss,
|
|
logits=pooled_logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
)
|