From 92429057d9cb0581ec53e6208fc6e63cdf0a01ab Mon Sep 17 00:00:00 2001 From: Mohamed Mekkouri <93391238+MekkCyber@users.noreply.github.com> Date: Thu, 27 Mar 2025 12:38:37 +0100 Subject: [PATCH] Skip FP8 linear tests For device capability < 9.0(#37008) * skip fp8 linear * add capability check * format --- tests/quantization/finegrained_fp8/test_fp8.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/quantization/finegrained_fp8/test_fp8.py b/tests/quantization/finegrained_fp8/test_fp8.py index e59c2068cde..69881b4cbbf 100644 --- a/tests/quantization/finegrained_fp8/test_fp8.py +++ b/tests/quantization/finegrained_fp8/test_fp8.py @@ -250,6 +250,10 @@ class FP8QuantizerTest(unittest.TestCase): class FP8LinearTest(unittest.TestCase): device = "cuda" + @unittest.skipIf( + torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 9, + "Skipping FP8LinearTest because it is not supported on GPU with capability < 9.0", + ) def test_linear_preserves_shape(self): """ Test that FP8Linear preserves shape when in_features == out_features. @@ -262,6 +266,10 @@ class FP8LinearTest(unittest.TestCase): x_ = linear(x) self.assertEqual(x_.shape, x.shape) + @unittest.skipIf( + torch.cuda.is_available() and torch.cuda.get_device_capability()[0] < 9, + "Skipping FP8LinearTest because it is not supported on GPU with capability < 9.0", + ) def test_linear_with_diff_feature_size_preserves_shape(self): """ Test that FP8Linear generates the correct shape when in_features != out_features.