[Style] fix E721 warnings (#36474)

* fix E721 warnings

* config.hidden_size is not a tuple

* fix copies

* fix-copies

* not a tuple

* undo

* undo
This commit is contained in:
Kashif Rasul 2025-03-03 19:03:42 +01:00 committed by GitHub
parent 1975be4d97
commit 9fe82793ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 37 additions and 37 deletions

View File

@ -41,7 +41,7 @@
"from scipy import sparse\n",
"from torch import nn\n",
"\n",
"from transformers import *\n",
"from transformers import BertForQuestionAnswering\n",
"\n",
"\n",
"os.chdir(\"../../\")"
@ -307,7 +307,7 @@
" print(f\"Skip {name}\")\n",
" continue\n",
"\n",
" if type(param) == torch.Tensor:\n",
" if isinstance(param, torch.Tensor):\n",
" if param.numel() == 1:\n",
" # module scale\n",
" # module zero_point\n",
@ -319,13 +319,13 @@
" param = param.detach().numpy()\n",
" hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n",
"\n",
" elif type(param) == float or type(param) == int or type(param) == tuple:\n",
" elif isinstance(param, (float, int, tuple)):\n",
" # float - tensor _packed_params.weight.scale\n",
" # int - tensor _packed_params.weight.zero_point\n",
" # tuple - tensor _packed_params.weight.shape\n",
" hf.attrs[name] = param\n",
"\n",
" elif type(param) == torch.dtype:\n",
" elif isinstance(param, torch.dtype):\n",
" # dtype - tensor _packed_params.dtype\n",
" hf.attrs[name] = dtype_2_str[param]\n",
"\n",
@ -370,7 +370,7 @@
" # print(f\"Skip {name}\")\n",
" # continue\n",
"\n",
" if type(param) == torch.Tensor:\n",
" if isinstance(param, torch.Tensor):\n",
" if param.numel() == 1:\n",
" # module scale\n",
" # module zero_point\n",
@ -382,13 +382,13 @@
" param = param.detach().numpy()\n",
" hf.create_dataset(name, data=param, compression=\"gzip\", compression_opts=9)\n",
"\n",
" elif type(param) == float or type(param) == int or type(param) == tuple:\n",
" elif isinstance(param, (float, int, tuple)):\n",
" # float - tensor _packed_params.weight.scale\n",
" # int - tensor _packed_params.weight.zero_point\n",
" # tuple - tensor _packed_params.weight.shape\n",
" hf.attrs[name] = param\n",
"\n",
" elif type(param) == torch.dtype:\n",
" elif isinstance(param, torch.dtype):\n",
" # dtype - tensor _packed_params.dtype\n",
" hf.attrs[name] = dtype_2_str[param]\n",
"\n",
@ -471,10 +471,10 @@
" assert name in reconstructed_elementary_qtz_st, name\n",
"\n",
"for name, param in reconstructed_elementary_qtz_st.items():\n",
" assert type(param) == type(elementary_qtz_st[name]), name\n",
" if type(param) == torch.Tensor:\n",
" assert isinstance(param, type(elementary_qtz_st[name])), name\n",
" if isinstance(param, torch.Tensor):\n",
" assert torch.all(torch.eq(param, elementary_qtz_st[name])), name\n",
" elif type(param) == np.ndarray:\n",
" elif isinstance(param, np.ndarray):\n",
" assert (param == elementary_qtz_st[name]).all(), name\n",
" else:\n",
" assert param == elementary_qtz_st[name], name"
@ -532,10 +532,10 @@
" assert name in reconstructed_qtz_st, name\n",
"\n",
"for name, param in reconstructed_qtz_st.items():\n",
" assert type(param) == type(qtz_st[name]), name\n",
" if type(param) == torch.Tensor:\n",
" assert isinstance(param, type(qtz_st[name])), name\n",
" if isinstance(param, torch.Tensor):\n",
" assert torch.all(torch.eq(param, qtz_st[name])), name\n",
" elif type(param) == np.ndarray:\n",
" elif isinstance(param, np.ndarray):\n",
" assert (param == qtz_st[name]).all(), name\n",
" else:\n",
" assert param == qtz_st[name], name"

View File

@ -114,7 +114,7 @@ class ASTSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -270,7 +270,7 @@ class BeitSelfAttention(nn.Module):
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {(config.hidden_size,)} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -271,7 +271,7 @@ class FlaxBeitSelfAttention(nn.Module):
self.config, "embedding_size"
):
raise ValueError(
f"The hidden size {self.config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {self.config.hidden_size} is not a multiple of the number of attention "
f"heads {self.config.num_attention_heads}."
)

View File

@ -271,7 +271,7 @@ class Data2VecVisionSelfAttention(nn.Module):
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {(config.hidden_size,)} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -186,7 +186,7 @@ class DeiTSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -345,7 +345,7 @@ class TvltSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -204,7 +204,7 @@ class ViTHybridSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -178,7 +178,7 @@ class Dinov2SelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -190,7 +190,7 @@ class Dinov2WithRegistersSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -301,7 +301,7 @@ class DPTViTSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -438,7 +438,7 @@ class FlavaSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -194,7 +194,7 @@ class IJepaSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -501,7 +501,7 @@ class LukeSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -215,7 +215,7 @@ class MobileViTSelfAttention(nn.Module):
if hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {hidden_size,} is not a multiple of the number of attention "
f"The hidden size {hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -262,7 +262,7 @@ class TFMobileViTSelfAttention(keras.layers.Layer):
if hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {hidden_size,} is not a multiple of the number of attention "
f"The hidden size {hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -112,7 +112,7 @@ class Qwen2AudioProcessor(ProcessorMixin):
# ensure we have as much audios as audio tokens
num_audio_tokens = sum(sample.count(self.audio_token) for sample in text)
num_audios = 1 if type(audios) == np.ndarray else len(audios)
num_audios = 1 if isinstance(audios, np.ndarray) else len(audios)
if num_audio_tokens != num_audios:
raise ValueError(
f"Found {num_audio_tokens} {self.audio_token} token{'s' if num_audio_tokens > 1 else ''} in provided text but received {num_audios} audio{'s' if num_audios > 1 else ''}"

View File

@ -201,7 +201,7 @@ class VideoMAESelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -322,7 +322,7 @@ class ViltSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -189,7 +189,7 @@ class ViTSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -362,7 +362,7 @@ class ViTMAESelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -179,7 +179,7 @@ class ViTMSNSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -109,7 +109,7 @@ class VitPoseBackboneSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -172,7 +172,7 @@ class VivitSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)

View File

@ -237,7 +237,7 @@ class YolosSelfAttention(nn.Module):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)