Fix typo in EETQ Tests (#35160)

fix
This commit is contained in:
Mohamed Mekkouri 2024-12-09 14:13:36 +01:00 committed by GitHub
parent de8a0b7547
commit 7238387f67
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -119,7 +119,7 @@ class EetqTest(unittest.TestCase):
self.assertEqual(nb_linears - 1, nb_eetq_linear) self.assertEqual(nb_linears - 1, nb_eetq_linear)
# Try with `linear_weights_not_to_quantize` # Try with `modules_to_not_convert`
with init_empty_weights(): with init_empty_weights():
model = OPTForCausalLM(config) model = OPTForCausalLM(config)
quantization_config = EetqConfig(modules_to_not_convert=["fc1"]) quantization_config = EetqConfig(modules_to_not_convert=["fc1"])
@ -128,7 +128,7 @@ class EetqTest(unittest.TestCase):
for module in model.modules(): for module in model.modules():
if isinstance(module, EetqLinear): if isinstance(module, EetqLinear):
nb_eetq_linear += 1 nb_eetq_linear += 1
# 25 corresponds to the lm_head along with 24 fc1 layers.
self.assertEqual(nb_linears - 25, nb_eetq_linear) self.assertEqual(nb_linears - 25, nb_eetq_linear)
def test_quantized_model(self): def test_quantized_model(self):