Ensure fast tokenizer can construct tensor without pad token if only one sample is provided. (#4201)

This commit is contained in:
Funtowicz Morgan 2020-05-07 14:02:53 +00:00 committed by GitHub
parent 0a6cbea0a5
commit 026097b9ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -2435,7 +2435,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizer):
)
# Needed if we have to return a tensor
pad_to_max_length = pad_to_max_length or (return_tensors is not None)
pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1)
# Throw an error if we can pad because there is no padding token
if pad_to_max_length and self.pad_token_id is None: