Merge pull request #1337 from mgrankin/fastdataset

faster dataset building
This commit is contained in:
Thomas Wolf 2019-09-27 10:35:12 +02:00 committed by GitHub
commit d83d295763
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -74,9 +74,8 @@ class TextDataset(Dataset):
tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
while len(tokenized_text) >= block_size: # Truncate in block of block_size
self.examples.append(tokenizer.add_special_tokens_single_sequence(tokenized_text[:block_size]))
tokenized_text = tokenized_text[block_size:]
for i in range(0, len(tokenized_text)-block_size+1, block_size): # Truncate in block of block_size
self.examples.append(tokenizer.add_special_tokens_single_sentence(tokenized_text[i:i+block_size]))
# Note that we are loosing the last truncated example here for the sake of simplicity (no padding)
# If your dataset is small, first you should loook for a bigger one :-) and second you
# can change this behavior by adding (model specific) padding.