mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
working on automodels
This commit is contained in:
parent
58830807d1
commit
b90e29d52c
26
docs/source/model_doc/auto.rst
Normal file
26
docs/source/model_doc/auto.rst
Normal file
@ -0,0 +1,26 @@
|
||||
AutoModel, AutoConfig and AutoTokenizer - Standard derived classes
|
||||
---------------------------------------------------------------
|
||||
|
||||
In many case, the architecture you want to use can be guessed from the name or the path of the pretrained model you are supplying to the ``from_pretrained`` method.
|
||||
|
||||
Auto classes are here to do this job for you so that you automatically retreive the relevant model given the name/path to the pretrained weights/config/vocabulary.
|
||||
|
||||
``AutoConfig``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.AutoConfig
|
||||
:members:
|
||||
|
||||
|
||||
``AutoModel``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.AutoModel
|
||||
:members:
|
||||
|
||||
|
||||
``AutoTokenizer``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: pytorch_transformers.AutoTokenizer
|
||||
:members:
|
@ -134,7 +134,7 @@ def train(args, train_dataset, model, tokenizer):
|
||||
'end_positions': batch[4]}
|
||||
if args.model_type in ['xlnet', 'xlm']:
|
||||
inputs.update({'cls_index': batch[5],
|
||||
'p_mask': batch[6]})
|
||||
'p_mask': batch[6]})
|
||||
outputs = model(**inputs)
|
||||
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
|
||||
|
||||
|
@ -18,6 +18,11 @@ from __future__ import absolute_import, division, print_function, unicode_litera
|
||||
|
||||
import logging
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import CrossEntropyLoss, MSELoss
|
||||
from torch.nn.parameter import Parameter
|
||||
|
||||
from .modeling_bert import BertConfig, BertModel
|
||||
from .modeling_openai import OpenAIGPTConfig, OpenAIGPTModel
|
||||
from .modeling_gpt2 import GPT2Config, GPT2Model
|
||||
@ -25,6 +30,8 @@ from .modeling_transfo_xl import TransfoXLConfig, TransfoXLModel
|
||||
from .modeling_xlnet import XLNetConfig, XLNetModel
|
||||
from .modeling_xlm import XLMConfig, XLMModel
|
||||
|
||||
from .modeling_utils import PreTrainedModel, SequenceSummary
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class AutoConfig(object):
|
||||
@ -228,3 +235,245 @@ class AutoModel(object):
|
||||
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
|
||||
"'xlm'".format(pretrained_model_name_or_path))
|
||||
|
||||
|
||||
class DerivedAutoModel(PreTrainedModel):
|
||||
r"""
|
||||
:class:`~pytorch_transformers.DerivedAutoModel` is a base class for building
|
||||
standardized derived models on top of :class:`~pytorch_transformers.AutoModel` by adding heads
|
||||
|
||||
The `from_pretrained()` method take care of using the correct base model class instance
|
||||
using pattern matching on the `pretrained_model_name_or_path` string.
|
||||
|
||||
The base model class to instantiate is selected as the first pattern matching
|
||||
in the `pretrained_model_name_or_path` string (in the following order):
|
||||
- contains `bert`: BertConfig (Bert model)
|
||||
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
|
||||
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
|
||||
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
|
||||
- contains `xlnet`: XLNetConfig (XLNet model)
|
||||
- contains `xlm`: XLMConfig (XLM model)
|
||||
|
||||
This class should usually not be instantiated using `__init__()` but `from_pretrained()`.
|
||||
"""
|
||||
config_class = None
|
||||
pretrained_model_archive_map = {}
|
||||
load_tf_weights = lambda model, config, path: None
|
||||
base_model_prefix = "transformer"
|
||||
|
||||
def __init__(self, base_model):
|
||||
super(DerivedAutoModel, self).__init__(base_model.config)
|
||||
self.transformer = base_model
|
||||
|
||||
def init_weights(self, module):
|
||||
""" Initialize the weights. Use the base model initialization function.
|
||||
"""
|
||||
self.transformer.init_weights(module)
|
||||
|
||||
@classmethod
|
||||
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
|
||||
r""" Instantiate a :class:`~pytorch_transformers.DerivedAutoModel` with one of the base model classes of the library
|
||||
from a pre-trained model configuration.
|
||||
|
||||
The base model class to instantiate is selected as the first pattern matching
|
||||
in the `pretrained_model_name_or_path` string (in the following order):
|
||||
- contains `bert`: BertConfig (Bert model)
|
||||
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
|
||||
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
|
||||
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
|
||||
- contains `xlnet`: XLNetConfig (XLNet model)
|
||||
- contains `xlm`: XLMConfig (XLM model)
|
||||
|
||||
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated)
|
||||
To train the model, you should first set it back in training mode with `model.train()`
|
||||
|
||||
Params:
|
||||
**pretrained_model_name_or_path**: either:
|
||||
- a string with the `shortcut name` of a pre-trained model to load from cache
|
||||
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
|
||||
- a path to a `directory` containing a configuration file saved
|
||||
using the `save_pretrained(save_directory)` method.
|
||||
- a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`).
|
||||
In this case, ``from_tf`` should be set to True and a configuration object should be
|
||||
provided as `config` argument. This loading option is slower than converting the TensorFlow
|
||||
checkpoint in a PyTorch model using the provided conversion scripts and loading
|
||||
the PyTorch model afterwards.
|
||||
**model_args**: (`optional`) Sequence:
|
||||
All remaning positional arguments will be passed to the underlying model's __init__ function
|
||||
**config**: an optional configuration for the model to use instead of an automatically loaded configuation.
|
||||
Configuration can be automatically loaded when:
|
||||
- the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or
|
||||
- the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory).
|
||||
**state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded
|
||||
from saved weights file.
|
||||
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
|
||||
In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not
|
||||
a simpler option.
|
||||
**cache_dir**: (`optional`) string:
|
||||
Path to a directory in which a downloaded pre-trained model
|
||||
configuration should be cached if the standard cache should not be used.
|
||||
**output_loading_info**: (`optional`) boolean:
|
||||
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
|
||||
**kwargs**: (`optional`) dict:
|
||||
Dictionary of key, values to update the configuration object after loading.
|
||||
Can be used to override selected configuration parameters. E.g. ``output_attention=True``.
|
||||
|
||||
- If a configuration is provided with `config`, **kwargs will be directly passed
|
||||
to the underlying model's __init__ method.
|
||||
- If a configuration is not provided, **kwargs will be first passed to the pretrained
|
||||
model configuration class loading function (`PretrainedConfig.from_pretrained`).
|
||||
Each key of **kwargs that corresponds to a configuration attribute
|
||||
will be used to override said attribute with the supplied **kwargs value.
|
||||
Remaining keys that do not correspond to any configuration attribute will
|
||||
be passed to the underlying model's __init__ function.
|
||||
|
||||
Examples::
|
||||
|
||||
model = AutoModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
|
||||
model = AutoModel.from_pretrained('./test/bert_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
|
||||
model = AutoModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
|
||||
assert model.config.output_attention == True
|
||||
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
|
||||
config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
|
||||
model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
|
||||
|
||||
"""
|
||||
if 'bert' in pretrained_model_name_or_path:
|
||||
base_model_class = BertModel
|
||||
elif 'openai-gpt' in pretrained_model_name_or_path:
|
||||
base_model_class = OpenAIGPTModel
|
||||
elif 'gpt2' in pretrained_model_name_or_path:
|
||||
base_model_class = GPT2Model
|
||||
elif 'transfo-xl' in pretrained_model_name_or_path:
|
||||
base_model_class = TransfoXLModel
|
||||
elif 'xlnet' in pretrained_model_name_or_path:
|
||||
base_model_class = XLNetModel
|
||||
elif 'xlm' in pretrained_model_name_or_path:
|
||||
base_model_class = XLMModel
|
||||
else:
|
||||
raise ValueError("Unrecognized model identifier in {}. Should contains one of "
|
||||
"'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', "
|
||||
"'xlm'".format(pretrained_model_name_or_path))
|
||||
|
||||
# Get a pretrained base_model
|
||||
base_model = base_model_class.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
|
||||
|
||||
# Create our derived model
|
||||
model = cls(base_model)
|
||||
|
||||
# Setup class attribute from the base model class
|
||||
model.config_class = base_model.config_class
|
||||
model.pretrained_model_archive_map = base_model.pretrained_model_archive_map
|
||||
model.load_tf_weights = base_model.load_tf_weights
|
||||
|
||||
return model
|
||||
|
||||
|
||||
class AutoModelWithLMHead(DerivedAutoModel):
|
||||
r"""
|
||||
:class:`~pytorch_transformers.AutoModelWithLMHead` is a base class for language modeling
|
||||
that contains
|
||||
|
||||
- a base model instantiated as one of the base model classes of the library when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class method, and .
|
||||
- a language modeling head on top of the base model.
|
||||
|
||||
The `from_pretrained()` method take care of using the correct base model class instance
|
||||
using pattern matching on the `pretrained_model_name_or_path` string.
|
||||
|
||||
The base model class to instantiate is selected as the first pattern matching
|
||||
in the `pretrained_model_name_or_path` string (in the following order):
|
||||
- contains `bert`: BertConfig (Bert model)
|
||||
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
|
||||
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
|
||||
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
|
||||
- contains `xlnet`: XLNetConfig (XLNet model)
|
||||
- contains `xlm`: XLMConfig (XLM model)
|
||||
|
||||
This class should usually not be instantiated using `__init__()` but `from_pretrained()`.
|
||||
"""
|
||||
|
||||
def __init__(self, base_model):
|
||||
super(AutoModelWithLMHead, self).__init__(base_model)
|
||||
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
||||
|
||||
self.apply(self.init_weights)
|
||||
self.tie_weights()
|
||||
|
||||
def tie_weights(self):
|
||||
""" Make sure we are sharing the input and output embeddings.
|
||||
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
|
||||
"""
|
||||
# get input embeddings - whatever the model is
|
||||
input_embeddings = self.transformer.resize_token_embeddings(new_num_tokens=None)
|
||||
|
||||
# tie of clone (torchscript) embeddings
|
||||
self._tie_or_clone_weights(self.lm_head, input_embeddings)
|
||||
|
||||
def forward(self, input_ids, **kwargs):
|
||||
labels = kwargs.pop('labels', None) # Python 2 compatibility...
|
||||
|
||||
transformer_outputs = self.transformer(input_ids, **kwargs)
|
||||
hidden_states = transformer_outputs[0]
|
||||
|
||||
lm_logits = self.lm_head(hidden_states)
|
||||
|
||||
outputs = (lm_logits,) + transformer_outputs[1:]
|
||||
if labels is not None:
|
||||
loss_fct = CrossEntropyLoss(ignore_index=-1)
|
||||
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)),
|
||||
labels.view(-1))
|
||||
outputs = (loss,) + outputs
|
||||
|
||||
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
|
||||
|
||||
|
||||
class AutoModelForSequenceClassification(DerivedAutoModel):
|
||||
r"""
|
||||
:class:`~pytorch_transformers.AutoModelForSequenceClassification` is a class for sequence classification
|
||||
that contains
|
||||
|
||||
- a base model instantiated as one of the base model classes of the library when created with the `AutoModel.from_pretrained(pretrained_model_name_or_path)` class method, and .
|
||||
- a classification head on top of the base model.
|
||||
|
||||
The `from_pretrained()` method take care of using the correct base model class instance
|
||||
using pattern matching on the `pretrained_model_name_or_path` string.
|
||||
|
||||
The base model class to instantiate is selected as the first pattern matching
|
||||
in the `pretrained_model_name_or_path` string (in the following order):
|
||||
- contains `bert`: BertConfig (Bert model)
|
||||
- contains `openai-gpt`: OpenAIGPTConfig (OpenAI GPT model)
|
||||
- contains `gpt2`: GPT2Config (OpenAI GPT-2 model)
|
||||
- contains `transfo-xl`: TransfoXLConfig (Transformer-XL model)
|
||||
- contains `xlnet`: XLNetConfig (XLNet model)
|
||||
- contains `xlm`: XLMConfig (XLM model)
|
||||
|
||||
This class should usually not be instantiated using `__init__()` but `from_pretrained()`.
|
||||
"""
|
||||
|
||||
def __init__(self, base_model):
|
||||
super(AutoModelForSequenceClassification, self).__init__(base_model)
|
||||
self.num_labels = base_model.config.num_labels
|
||||
self.sequence_summary = SequenceSummary(base_model.config)
|
||||
|
||||
self.apply(self.init_weights)
|
||||
|
||||
def forward(self, input_ids, cls_index, **kwargs):
|
||||
labels = kwargs.pop('labels', None) # Python 2 compatibility...
|
||||
|
||||
transformer_outputs = self.transformer(input_ids, **kwargs)
|
||||
|
||||
output = transformer_outputs[0]
|
||||
logits = self.sequence_summary(output, cls_index=cls_index)
|
||||
|
||||
outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here
|
||||
|
||||
if labels is not None:
|
||||
if self.num_labels == 1:
|
||||
# We are doing regression
|
||||
loss_fct = MSELoss()
|
||||
loss = loss_fct(logits.view(-1), labels.view(-1))
|
||||
else:
|
||||
loss_fct = CrossEntropyLoss()
|
||||
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
||||
outputs = (loss,) + outputs
|
||||
|
||||
return outputs
|
||||
|
@ -137,7 +137,7 @@ class GPT2Config(PretrainedConfig):
|
||||
initializer_range=0.02,
|
||||
|
||||
num_labels=1,
|
||||
summary_type='token_ids',
|
||||
summary_type='cls_index',
|
||||
summary_use_proj=True,
|
||||
summary_activation=None,
|
||||
summary_proj_to_labels=True,
|
||||
|
@ -171,7 +171,7 @@ class OpenAIGPTConfig(PretrainedConfig):
|
||||
predict_special_tokens=True,
|
||||
|
||||
num_labels=1,
|
||||
summary_type='token_ids',
|
||||
summary_type='cls_index',
|
||||
summary_use_proj=True,
|
||||
summary_activation=None,
|
||||
summary_proj_to_labels=True,
|
||||
|
@ -765,7 +765,7 @@ class SequenceSummary(nn.Module):
|
||||
- 'last' => [default] take the last token hidden state (like XLNet)
|
||||
- 'first' => take the first token hidden state (like Bert)
|
||||
- 'mean' => take the mean of all tokens hidden states
|
||||
- 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)
|
||||
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
|
||||
- 'attn' => Not implemented now, use multi-head attention
|
||||
summary_use_proj: Add a projection after the vector extraction
|
||||
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
|
||||
@ -803,11 +803,11 @@ class SequenceSummary(nn.Module):
|
||||
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
|
||||
self.last_dropout = nn.Dropout(config.summary_last_dropout)
|
||||
|
||||
def forward(self, hidden_states, token_ids=None):
|
||||
def forward(self, hidden_states, cls_index=None):
|
||||
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
|
||||
token_ids: [optional] index of the classification token if summary_type == 'token_ids',
|
||||
cls_index: [optional] position of the classification token if summary_type == 'cls_index',
|
||||
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
|
||||
if summary_type == 'token_ids' and token_ids is None:
|
||||
if summary_type == 'cls_index' and cls_index is None:
|
||||
we take the last token of the sequence as classification token
|
||||
"""
|
||||
if self.summary_type == 'last':
|
||||
@ -816,14 +816,14 @@ class SequenceSummary(nn.Module):
|
||||
output = hidden_states[:, 0]
|
||||
elif self.summary_type == 'mean':
|
||||
output = hidden_states.mean(dim=1)
|
||||
elif self.summary_type == 'token_ids':
|
||||
if token_ids is None:
|
||||
token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
|
||||
elif self.summary_type == 'cls_index':
|
||||
if cls_index is None:
|
||||
cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
|
||||
else:
|
||||
token_ids = token_ids.unsqueeze(-1).unsqueeze(-1)
|
||||
token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),))
|
||||
# shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
|
||||
output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size)
|
||||
cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
|
||||
cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
|
||||
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
|
||||
output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size)
|
||||
elif self.summary_type == 'attn':
|
||||
raise NotImplementedError
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user