diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 83d5c649b5c..37c57ef65ff 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -2012,7 +2012,6 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): # Get files from url, cache, or disk depending on the case resolved_vocab_files = {} - unresolved_files = [] for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None @@ -2041,12 +2040,6 @@ class PreTrainedTokenizerBase(SpecialTokensMixin, PushToHubMixin): ) commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) - if len(unresolved_files) > 0: - logger.info( - f"Can't load following files from cache: {unresolved_files} and cannot check if these " - "files are necessary for the tokenizer to operate." - ) - # If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be # loaded directly from the GGUF file. if all(full_file_name is None for full_file_name in resolved_vocab_files.values()) and not gguf_file: