diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 9e167cfdeeb..f879aabc262 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2853,7 +2853,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters: kwargs["special_dtypes"] = special_dtypes elif len(special_dtypes) > 0: - logger.warn( + logger.warning( "This model has some weights that should be kept in higher precision, you need to upgrade " "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)." ) @@ -3359,7 +3359,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix if len(unexpected_keys) > 0: archs = [] if model.config.architectures is None else model.config.architectures - warner = logger.warn if model.__class__.__name__ in archs else logger.info + warner = logger.warning if model.__class__.__name__ in archs else logger.info warner( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" diff --git a/src/transformers/models/blip/modeling_blip_text.py b/src/transformers/models/blip/modeling_blip_text.py index 444a7a22b6b..61e983bb2db 100644 --- a/src/transformers/models/blip/modeling_blip_text.py +++ b/src/transformers/models/blip/modeling_blip_text.py @@ -403,7 +403,7 @@ class BlipTextEncoder(nn.Module): ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: - logger.warn( + logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False diff --git a/src/transformers/models/blip_2/modeling_blip_2.py b/src/transformers/models/blip_2/modeling_blip_2.py index a54aa9f633c..611a95bb1c1 100644 --- a/src/transformers/models/blip_2/modeling_blip_2.py +++ b/src/transformers/models/blip_2/modeling_blip_2.py @@ -940,7 +940,7 @@ class Blip2QFormerEncoder(nn.Module): if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: - logger.warn( + logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False diff --git a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py index f02c64502c0..0a7f0f8e985 100644 --- a/src/transformers/models/deprecated/open_llama/modeling_open_llama.py +++ b/src/transformers/models/deprecated/open_llama/modeling_open_llama.py @@ -39,7 +39,7 @@ try: from xformers import ops as xops except ImportError: xops = None - logger.warn( + logger.warning( "Xformers is not installed correctly. If you want to use memory_efficient_attention to accelerate training use the following command to install Xformers\npip install xformers." ) diff --git a/src/transformers/models/instructblip/modeling_instructblip.py b/src/transformers/models/instructblip/modeling_instructblip.py index 48961998ed1..b532d78b445 100644 --- a/src/transformers/models/instructblip/modeling_instructblip.py +++ b/src/transformers/models/instructblip/modeling_instructblip.py @@ -930,7 +930,7 @@ class InstructBlipQFormerEncoder(nn.Module): if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: - logger.warn( + logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 88d666cd534..95f465262c4 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -1273,7 +1273,7 @@ class Wav2Vec2PreTrainedModel(PreTrainedModel): raise ValueError(f"Cannot load_adapter for {target_lang} if `config.adapter_attn_dim` is not defined.") if target_lang == self.target_lang and not force_load: - logger.warn(f"Adapter weights are already set to {target_lang}.") + logger.warning(f"Adapter weights are already set to {target_lang}.") return cache_dir = kwargs.pop("cache_dir", None) diff --git a/src/transformers/tools/agents.py b/src/transformers/tools/agents.py index ec4e0c1cc36..c63aa9a063c 100644 --- a/src/transformers/tools/agents.py +++ b/src/transformers/tools/agents.py @@ -224,12 +224,12 @@ class Agent: self._toolbox.update(additional_tools) if len(replacements) > 1: names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) - logger.warn( + logger.warning( f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." ) elif len(replacements) == 1: name = list(replacements.keys())[0] - logger.warn(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") + logger.warning(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") self.prepare_for_new_chat() diff --git a/src/transformers/tools/base.py b/src/transformers/tools/base.py index f4a0b31da43..bf2dd8f1605 100644 --- a/src/transformers/tools/base.py +++ b/src/transformers/tools/base.py @@ -264,7 +264,7 @@ class Tool: if len(tool_class.name) == 0: tool_class.name = custom_tool["name"] if tool_class.name != custom_tool["name"]: - logger.warn( + logger.warning( f"{tool_class.__name__} implements a different name in its configuration and class. Using the tool " "configuration name." ) @@ -273,7 +273,7 @@ class Tool: if len(tool_class.description) == 0: tool_class.description = custom_tool["description"] if tool_class.description != custom_tool["description"]: - logger.warn( + logger.warning( f"{tool_class.__name__} implements a different description in its configuration and class. Using the " "tool configuration description." )