mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
Ensure fast tokenizer can construct tensor without pad token if only one sample is provided. (#4201)
This commit is contained in:
parent
0a6cbea0a5
commit
026097b9ee
@ -2435,7 +2435,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizer):
|
||||
)
|
||||
|
||||
# Needed if we have to return a tensor
|
||||
pad_to_max_length = pad_to_max_length or (return_tensors is not None)
|
||||
pad_to_max_length = pad_to_max_length or (return_tensors is not None and len(batch_text_or_text_pairs) > 1)
|
||||
|
||||
# Throw an error if we can pad because there is no padding token
|
||||
if pad_to_max_length and self.pad_token_id is None:
|
||||
|
Loading…
Reference in New Issue
Block a user