diff --git a/pyproject.toml b/pyproject.toml index 1a488dbba9a..dfb41c24d5d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ target-version = ['py37'] [tool.ruff] # Never enforce `E501` (line length violations). -ignore = ["E501", "E741", "W605"] +ignore = ["C901", "E501", "E741", "W605"] select = ["C", "E", "F", "I", "W"] line-length = 119 diff --git a/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py b/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py index dc7b592bfb5..9813ccb9b80 100644 --- a/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py +++ b/src/transformers/models/efficientnet/convert_efficientnet_to_pytorch.py @@ -160,7 +160,7 @@ def convert_image_processor(model_name): # here we list all keys to be renamed (original name on the left, our name on the right) def rename_keys(original_param_names): block_names = [v.split("_")[0].split("block")[1] for v in original_param_names if v.startswith("block")] - block_names = sorted(list(set(block_names))) + block_names = sorted(set(block_names)) num_blocks = len(block_names) block_name_mapping = {b: str(i) for b, i in zip(block_names, range(num_blocks))} @@ -267,7 +267,7 @@ def convert_efficientnet_checkpoint(model_name, pytorch_dump_folder_path, save_m tf_params = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: tf_params[param.name] = param.numpy() - tf_param_names = [k for k in tf_params.keys()] + tf_param_names = list(tf_params.keys()) # Load HuggingFace model config = get_efficientnet_config(model_name) diff --git a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py index cb78f5473a7..302719b15f4 100644 --- a/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py +++ b/src/transformers/models/gptsan_japanese/modeling_gptsan_japanese.py @@ -1029,7 +1029,7 @@ class GPTSanJapaneseModel(GPTSanJapanesePreTrainedModel): ) # n_layer x batch x n_heads x N x N # outputs - present_key_value_states = tuple() if self.config.use_cache or use_cache else None + present_key_value_states = () if self.config.use_cache or use_cache else None all_hidden_states = () if self.config.output_hidden_states or output_hidden_states else None all_attentions = () if self.config.output_attentions or output_attentions else None all_router_probs = () if self.config.output_router_logits or output_router_logits else None