Add require_read_token to fp8 tests (#36189)

fix
This commit is contained in:
Mohamed Mekkouri 2025-02-14 12:27:35 +01:00 committed by GitHub
parent 5f726f8b8e
commit cb586a3999
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -20,6 +20,7 @@ import unittest
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, FineGrainedFP8Config, OPTForCausalLM
from transformers.testing_utils import (
require_accelerate,
require_read_token,
require_torch_gpu,
require_torch_multi_gpu,
slow,
@ -59,6 +60,7 @@ class FineGrainedFP8ConfigTest(unittest.TestCase):
@slow
@require_accelerate
@require_read_token
@require_torch_gpu
class FP8QuantizerTest(unittest.TestCase):
model_name = "meta-llama/Llama-3.2-1B"