mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
fix(type): padding_side type should be Optional[str] (#36326)
This commit is contained in:
parent
f4684a6eb2
commit
18276b03f7
@ -414,7 +414,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -570,7 +570,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -630,7 +630,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -684,7 +684,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -758,7 +758,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -807,7 +807,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -875,7 +875,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -930,7 +930,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1261,7 +1261,7 @@ class LayoutLMv2Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -165,7 +165,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -321,7 +321,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -386,7 +386,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -458,7 +458,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -612,7 +612,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -674,7 +674,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -543,7 +543,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -700,7 +700,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -761,7 +761,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -816,7 +816,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -891,7 +891,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -941,7 +941,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1010,7 +1010,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1065,7 +1065,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1400,7 +1400,7 @@ class LayoutLMv3Tokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -209,7 +209,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -366,7 +366,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -433,7 +433,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -505,7 +505,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -665,7 +665,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -728,7 +728,7 @@ class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -447,7 +447,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -602,7 +602,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -656,7 +656,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -729,7 +729,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -784,7 +784,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1102,7 +1102,7 @@ class LayoutXLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -277,7 +277,7 @@ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -445,7 +445,7 @@ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -600,7 +600,7 @@ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -662,7 +662,7 @@ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -412,7 +412,7 @@ class LEDTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
encoded_inputs = super()._pad(
|
||||
|
@ -280,7 +280,7 @@ class LEDTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
encoded_inputs = super()._pad(
|
||||
|
@ -570,7 +570,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -718,7 +718,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -801,7 +801,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1077,7 +1077,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1165,7 +1165,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1394,7 +1394,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
max_entity_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
verbose: bool = True,
|
||||
@ -1554,7 +1554,7 @@ class LukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -503,7 +503,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -655,7 +655,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -715,7 +715,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -769,7 +769,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -843,7 +843,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -892,7 +892,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -960,7 +960,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1015,7 +1015,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1375,7 +1375,7 @@ class MarkupLMTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -278,7 +278,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -430,7 +430,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -495,7 +495,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -567,7 +567,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -722,7 +722,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -784,7 +784,7 @@ class MarkupLMTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -399,7 +399,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -548,7 +548,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -632,7 +632,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: Optional[bool] = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -911,7 +911,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1000,7 +1000,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1230,7 +1230,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
max_entity_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
verbose: bool = True,
|
||||
@ -1391,7 +1391,7 @@ class MLukeTokenizer(PreTrainedTokenizer):
|
||||
max_entity_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -210,7 +210,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -310,7 +310,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -484,7 +484,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
# Load from model defaults
|
||||
@ -557,7 +557,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -658,7 +658,7 @@ class RoCBertTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
|
@ -522,7 +522,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -639,7 +639,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -748,7 +748,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = True,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -809,7 +809,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = True,
|
||||
return_attention_mask: Optional[bool] = True,
|
||||
@ -927,7 +927,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1010,7 +1010,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = True,
|
||||
return_attention_mask: Optional[bool] = True,
|
||||
@ -1070,7 +1070,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
truncation: Union[bool, str, TapasTruncationStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = True,
|
||||
return_attention_mask: Optional[bool] = True,
|
||||
@ -1775,7 +1775,7 @@ class TapasTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -551,7 +551,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -707,7 +707,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -818,7 +818,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -899,7 +899,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -953,7 +953,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1026,7 +1026,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1081,7 +1081,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -1401,7 +1401,7 @@ class UdopTokenizer(PreTrainedTokenizer):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -286,7 +286,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -456,7 +456,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -533,7 +533,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -691,7 +691,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -803,7 +803,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -875,7 +875,7 @@ class UdopTokenizerFast(PreTrainedTokenizerFast):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -781,7 +781,7 @@ class Wav2Vec2Tokenizer(PreTrainedTokenizer):
|
||||
padding: Union[bool, str, PaddingStrategy] = False,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
verbose: bool = True,
|
||||
**kwargs,
|
||||
|
@ -752,7 +752,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -838,7 +838,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -920,7 +920,7 @@ class PreTrainedTokenizer(PreTrainedTokenizerBase):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
|
@ -2622,7 +2622,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
truncation: Union[bool, str, TruncationStrategy] = None,
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
**kwargs,
|
||||
) -> List[int]:
|
||||
@ -2813,7 +2813,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -2900,7 +2900,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3019,7 +3019,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3094,7 +3094,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3126,7 +3126,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3203,7 +3203,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3229,7 +3229,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
padding: Union[bool, str, PaddingStrategy] = True,
|
||||
max_length: Optional[int] = None,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
verbose: bool = True,
|
||||
@ -3447,7 +3447,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
max_length: Optional[int] = None,
|
||||
stride: int = 0,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[Union[str, TensorType]] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -3704,7 +3704,7 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
max_length: Optional[int] = None,
|
||||
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
) -> dict:
|
||||
"""
|
||||
|
@ -427,7 +427,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
|
||||
max_length: int,
|
||||
stride: int,
|
||||
pad_to_multiple_of: Optional[int],
|
||||
padding_side: Optional[bool],
|
||||
padding_side: Optional[str],
|
||||
):
|
||||
"""
|
||||
Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
|
||||
@ -507,7 +507,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[str] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
@ -597,7 +597,7 @@ class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
|
||||
stride: int = 0,
|
||||
is_split_into_words: bool = False,
|
||||
pad_to_multiple_of: Optional[int] = None,
|
||||
padding_side: Optional[bool] = None,
|
||||
padding_side: Optional[str] = None,
|
||||
return_tensors: Optional[bool] = None,
|
||||
return_token_type_ids: Optional[bool] = None,
|
||||
return_attention_mask: Optional[bool] = None,
|
||||
|
Loading…
Reference in New Issue
Block a user