mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
Removed some duplicated code (#35637)
* Removed duplicate class field definition. * Removed duplicate code in try-except block. --------- Co-authored-by: Pablo Montalvo <39954772+molbap@users.noreply.github.com>
This commit is contained in:
parent
b8c34d97fc
commit
91f14f1fc4
@ -196,7 +196,6 @@ class PaliGemmaPreTrainedModel(PreTrainedModel):
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
_supports_cache_class = True
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
|
||||
|
@ -2292,13 +2292,6 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin):
|
||||
"Unable to load vocabulary from file. "
|
||||
"Please check that the provided vocabulary is accessible and not corrupted."
|
||||
)
|
||||
except RuntimeError as e:
|
||||
if "sentencepiece_processor.cc" in str(e):
|
||||
logger.info(
|
||||
"Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
|
||||
"(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
|
||||
)
|
||||
return False
|
||||
|
||||
if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size:
|
||||
logger.info(
|
||||
|
Loading…
Reference in New Issue
Block a user