mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 18:51:14 +06:00
299 lines
12 KiB
Python
299 lines
12 KiB
Python
# coding=utf-8
|
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import csv
|
|
import sys
|
|
import copy
|
|
import json
|
|
import logging
|
|
|
|
from ...file_utils import is_tf_available, is_torch_available
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class InputExample(object):
|
|
"""
|
|
A single training/test example for simple sequence classification.
|
|
|
|
Args:
|
|
guid: Unique id for the example.
|
|
text_a: string. The untokenized text of the first sequence. For single
|
|
sequence tasks, only this sequence must be specified.
|
|
text_b: (Optional) string. The untokenized text of the second sequence.
|
|
Only must be specified for sequence pair tasks.
|
|
label: (Optional) string. The label of the example. This should be
|
|
specified for train and dev examples, but not for test examples.
|
|
"""
|
|
def __init__(self, guid, text_a, text_b=None, label=None):
|
|
self.guid = guid
|
|
self.text_a = text_a
|
|
self.text_b = text_b
|
|
self.label = label
|
|
|
|
def __repr__(self):
|
|
return str(self.to_json_string())
|
|
|
|
def to_dict(self):
|
|
"""Serializes this instance to a Python dictionary."""
|
|
output = copy.deepcopy(self.__dict__)
|
|
return output
|
|
|
|
def to_json_string(self):
|
|
"""Serializes this instance to a JSON string."""
|
|
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
|
|
|
|
|
class InputFeatures(object):
|
|
"""
|
|
A single set of features of data.
|
|
|
|
Args:
|
|
input_ids: Indices of input sequence tokens in the vocabulary.
|
|
attention_mask: Mask to avoid performing attention on padding token indices.
|
|
Mask values selected in ``[0, 1]``:
|
|
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
|
|
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
|
|
label: Label corresponding to the input
|
|
"""
|
|
|
|
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
|
|
self.input_ids = input_ids
|
|
self.attention_mask = attention_mask
|
|
self.token_type_ids = token_type_ids
|
|
self.label = label
|
|
|
|
def __repr__(self):
|
|
return str(self.to_json_string())
|
|
|
|
def to_dict(self):
|
|
"""Serializes this instance to a Python dictionary."""
|
|
output = copy.deepcopy(self.__dict__)
|
|
return output
|
|
|
|
def to_json_string(self):
|
|
"""Serializes this instance to a JSON string."""
|
|
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
|
|
|
|
|
|
class DataProcessor(object):
|
|
"""Base class for data converters for sequence classification data sets."""
|
|
|
|
@classmethod
|
|
def _read_tsv(cls, input_file, quotechar=None):
|
|
"""Reads a tab separated value file."""
|
|
with open(input_file, "r", encoding="utf-8-sig") as f:
|
|
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
|
|
lines = []
|
|
for line in reader:
|
|
if sys.version_info[0] == 2:
|
|
line = list(unicode(cell, 'utf-8') for cell in line)
|
|
lines.append(line)
|
|
return lines
|
|
|
|
|
|
class SingleSentenceClassificationProcessor(DataProcessor):
|
|
""" Generic processor for a single sentence classification data set."""
|
|
def __init__(self, labels=None, examples=None, mode='classification', verbose=False):
|
|
self.labels = [] if labels is None else labels
|
|
self.examples = [] if examples is None else examples
|
|
self.mode = mode
|
|
self.verbose = verbose
|
|
|
|
def __len__(self):
|
|
return len(self.examples)
|
|
|
|
def __getitem__(self, idx):
|
|
if isinstance(idx, slice):
|
|
return SingleSentenceClassificationProcessor(labels=self.labels,
|
|
examples=self.examples[idx])
|
|
return self.examples[idx]
|
|
|
|
@classmethod
|
|
def create_from_csv(cls, file_name, **kwargs):
|
|
processor = cls(**kwargs)
|
|
processor.add_examples_from_csv(file_name)
|
|
return processor
|
|
|
|
def add_examples_from_csv(self, file_name, split_name='', column_label=0, column_text=1, column_id=None,
|
|
overwrite_labels=False, overwrite_examples=False):
|
|
lines = self._read_tsv(file_name)
|
|
texts = []
|
|
labels = []
|
|
ids = []
|
|
for (i, line) in enumerate(lines):
|
|
texts.append(line[column_text])
|
|
labels.append(line[column_label])
|
|
if column_id is not None:
|
|
ids.append(line[column_id])
|
|
else:
|
|
guid = "%s-%s" % (split_name, i) if split_name else "%s" % i
|
|
ids.append(guid)
|
|
|
|
return self.add_examples(texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples)
|
|
|
|
def add_examples(self, texts, labels, ids=None, overwrite_labels=False, overwrite_examples=False):
|
|
if ids is None:
|
|
ids = [None] * len(texts)
|
|
assert len(texts) == len(labels)
|
|
assert len(texts) == len(ids)
|
|
|
|
examples = []
|
|
added_labels = set()
|
|
for (text, label, guid) in zip(texts, labels, ids):
|
|
added_labels.add(label)
|
|
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
|
|
|
|
# Update examples
|
|
if overwrite_examples:
|
|
self.examples = examples
|
|
else:
|
|
self.examples.extend(examples)
|
|
|
|
# Update labels
|
|
if overwrite_labels:
|
|
self.labels = list(added_labels)
|
|
else:
|
|
self.labels = list(set(self.labels).union(added_labels))
|
|
|
|
return self.examples
|
|
|
|
@classmethod
|
|
def create_from_examples(cls, texts, labels, **kwargs):
|
|
processor = cls(**kwargs)
|
|
processor.add_examples(texts, labels)
|
|
return processor
|
|
|
|
def get_features(self,
|
|
tokenizer,
|
|
max_length=None,
|
|
pad_on_left=False,
|
|
pad_token=0,
|
|
mask_padding_with_zero=True,
|
|
return_tensors=None):
|
|
"""
|
|
Convert examples in a list of ``InputFeatures``
|
|
|
|
Args:
|
|
tokenizer: Instance of a tokenizer that will tokenize the examples
|
|
max_length: Maximum example length
|
|
task: GLUE task
|
|
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
|
|
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
|
|
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
|
|
pad_token: Padding token
|
|
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
|
|
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
|
|
actual values)
|
|
|
|
Returns:
|
|
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
|
|
containing the task-specific features. If the input is a list of ``InputExamples``, will return
|
|
a list of task-specific ``InputFeatures`` which can be fed to the model.
|
|
|
|
"""
|
|
|
|
label_map = {label: i for i, label in enumerate(self.labels)}
|
|
|
|
all_input_ids = []
|
|
for (ex_index, example) in enumerate(self.examples):
|
|
if ex_index % 10000 == 0:
|
|
logger.info("Tokenizing example %d", ex_index)
|
|
|
|
input_ids = tokenizer.encode(
|
|
example.text_a,
|
|
add_special_tokens=True,
|
|
max_length=min(max_length, tokenizer.max_len),
|
|
)
|
|
all_input_ids.append(input_ids)
|
|
|
|
batch_length = max(len(input_ids) for input_ids in all_input_ids)
|
|
|
|
features = []
|
|
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
|
|
if ex_index % 10000 == 0:
|
|
logger.info("Writing example %d", ex_index)
|
|
# The mask has 1 for real tokens and 0 for padding tokens. Only real
|
|
# tokens are attended to.
|
|
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
|
|
|
|
# Zero-pad up to the sequence length.
|
|
padding_length = batch_length - len(input_ids)
|
|
if pad_on_left:
|
|
input_ids = ([pad_token] * padding_length) + input_ids
|
|
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
|
|
else:
|
|
input_ids = input_ids + ([pad_token] * padding_length)
|
|
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
|
|
|
|
assert len(input_ids) == batch_length, "Error with input length {} vs {}".format(len(input_ids), batch_length)
|
|
assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format(len(attention_mask), batch_length)
|
|
|
|
if self.mode == "classification":
|
|
label = label_map[example.label]
|
|
elif self.mode == "regression":
|
|
label = float(example.label)
|
|
else:
|
|
raise ValueError(self.mode)
|
|
|
|
if ex_index < 5 and self.verbose:
|
|
logger.info("*** Example ***")
|
|
logger.info("guid: %s" % (example.guid))
|
|
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
|
|
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
|
|
logger.info("label: %s (id = %d)" % (example.label, label))
|
|
|
|
features.append(
|
|
InputFeatures(input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
label=label))
|
|
|
|
if return_tensors is None:
|
|
return features
|
|
elif return_tensors == 'tf':
|
|
if not is_tf_available():
|
|
raise ImportError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
|
|
import tensorflow as tf
|
|
def gen():
|
|
for ex in features:
|
|
yield ({'input_ids': ex.input_ids,
|
|
'attention_mask': ex.attention_mask},
|
|
ex.label)
|
|
|
|
dataset = tf.data.Dataset.from_generator(gen,
|
|
({'input_ids': tf.int32,
|
|
'attention_mask': tf.int32},
|
|
tf.int64),
|
|
({'input_ids': tf.TensorShape([None]),
|
|
'attention_mask': tf.TensorShape([None])},
|
|
tf.TensorShape([])))
|
|
return dataset
|
|
elif return_tensors == 'pt':
|
|
if not is_torch_available():
|
|
raise ImportError("return_tensors set to 'pt' but PyTorch can't be imported")
|
|
import torch
|
|
from torch.utils.data import TensorDataset
|
|
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
|
|
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
|
|
if self.mode == "classification":
|
|
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
|
|
elif self.mode == "regression":
|
|
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
|
|
|
|
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
|
|
return dataset
|
|
else:
|
|
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
|