mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
chore: fix typos in tests directory (#36785)
* chore: fix typos in tests directory * chore: fix typos in tests directory * chore: fix typos in tests directory * chore: fix typos in tests directory * chore: fix typos in tests directory * chore: fix typos in tests directory * chore: fix typos in tests directory
This commit is contained in:
parent
7f5077e536
commit
19b9d8ae13
@ -43,7 +43,7 @@ class ConstraintTest(unittest.TestCase):
|
||||
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
|
||||
|
||||
def test_check_illegal_input(self):
|
||||
# We can't have constraints that are complete subsets of another. This leads to a preverse
|
||||
# We can't have constraints that are complete subsets of another. This leads to a perverse
|
||||
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
|
||||
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
|
||||
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
|
||||
|
@ -495,7 +495,7 @@ class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
def test_model_get_set_embeddings(self):
|
||||
pass
|
||||
|
||||
# override as the `temperature` parameter initilization is different for ALIGN
|
||||
# override as the `temperature` parameter initialization is different for ALIGN
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
|
||||
@ -504,7 +504,7 @@ class AlignModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
# check if `temperature` is initilized as per the original implementation
|
||||
# check if `temperature` is initialized as per the original implementation
|
||||
if name == "temperature":
|
||||
self.assertAlmostEqual(
|
||||
param.data.item(),
|
||||
|
@ -482,7 +482,7 @@ class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
|
||||
def test_model_get_set_embeddings(self):
|
||||
pass
|
||||
|
||||
# override as the `logit_scale` parameter initilization is different for AltCLIP
|
||||
# override as the `logit_scale` parameter initialization is different for AltCLIP
|
||||
def test_initialization(self):
|
||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||
configs_no_init = _config_zero_init(config)
|
||||
@ -490,7 +490,7 @@ class AltCLIPModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
|
||||
model = model_class(config=configs_no_init)
|
||||
for name, param in model.named_parameters():
|
||||
if param.requires_grad:
|
||||
# check if `logit_scale` is initilized as per the original implementation
|
||||
# check if `logit_scale` is initialized as per the original implementation
|
||||
if name == "logit_scale":
|
||||
self.assertAlmostEqual(
|
||||
param.data.item(),
|
||||
|
@ -186,7 +186,7 @@ class AutoFeatureExtractorTest(unittest.TestCase):
|
||||
model_config.save_pretrained(tmpdirname)
|
||||
# copy relevant files
|
||||
copyfile(SAMPLE_VOCAB, os.path.join(tmpdirname, "vocab.json"))
|
||||
# create emtpy sample processor
|
||||
# create empty sample processor
|
||||
with open(os.path.join(tmpdirname, FEATURE_EXTRACTOR_NAME), "w") as f:
|
||||
f.write("{}")
|
||||
|
||||
|
@ -613,7 +613,7 @@ class PipelineUtilsTest(unittest.TestCase):
|
||||
set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731
|
||||
for task in SUPPORTED_TASKS.keys():
|
||||
if task == "table-question-answering":
|
||||
# test table in seperate test due to more dependencies
|
||||
# test table in separate test due to more dependencies
|
||||
continue
|
||||
|
||||
self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt)
|
||||
@ -631,7 +631,7 @@ class PipelineUtilsTest(unittest.TestCase):
|
||||
set_seed_fn = lambda: keras.utils.set_random_seed(0) # noqa: E731
|
||||
for task in SUPPORTED_TASKS.keys():
|
||||
if task == "table-question-answering":
|
||||
# test table in seperate test due to more dependencies
|
||||
# test table in separate test due to more dependencies
|
||||
continue
|
||||
|
||||
self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf)
|
||||
|
@ -778,7 +778,7 @@ class TokenClassificationPipelineTests(unittest.TestCase):
|
||||
@require_tf
|
||||
def test_tf_only(self):
|
||||
model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version
|
||||
# We test that if we don't specificy framework='tf', it gets detected automatically
|
||||
# We test that if we don't specify framework='tf', it gets detected automatically
|
||||
token_classifier = pipeline(task="ner", model=model_name)
|
||||
self.assertEqual(token_classifier.framework, "tf")
|
||||
|
||||
|
@ -13,7 +13,7 @@ The following is the recipe on how to effectively debug `bitsandbytes` integrati
|
||||
|
||||
The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported.
|
||||
|
||||
## Virutal envs
|
||||
## Virtual envs
|
||||
|
||||
```bash
|
||||
conda create --name int8-testing python==3.8
|
||||
@ -61,7 +61,7 @@ This happens when some Linear weights are set to the CPU when using `accelerate`
|
||||
|
||||
Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved.
|
||||
|
||||
### `Parameter has no attribue .CB`
|
||||
### `Parameter has no attribute .CB`
|
||||
|
||||
Same solution as above.
|
||||
|
||||
@ -71,7 +71,7 @@ Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe a
|
||||
|
||||
### `CUDA illegal memory error: an illegal memory access at line...`:
|
||||
|
||||
Check the CUDA verisons with:
|
||||
Check the CUDA versions with:
|
||||
```bash
|
||||
nvcc --version
|
||||
```
|
||||
|
@ -179,7 +179,7 @@ class Bnb4BitTest(Base4bitTest):
|
||||
|
||||
def test_original_dtype(self):
|
||||
r"""
|
||||
A simple test to check if the model succesfully stores the original dtype
|
||||
A simple test to check if the model successfully stores the original dtype
|
||||
"""
|
||||
self.assertTrue(hasattr(self.model_4bit.config, "_pre_quantization_dtype"))
|
||||
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
|
||||
@ -496,8 +496,8 @@ class Pipeline4BitTest(Base4bitTest):
|
||||
def test_pipeline(self):
|
||||
r"""
|
||||
The aim of this test is to verify that the mixed 4bit is compatible with `pipeline` from transformers. Since
|
||||
we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything
|
||||
on pipline.
|
||||
we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything
|
||||
on pipeline.
|
||||
"""
|
||||
# self._clear_cuda_cache()
|
||||
self.pipe = pipeline(
|
||||
|
@ -213,7 +213,7 @@ class MixedInt8Test(BaseMixedInt8Test):
|
||||
|
||||
def test_original_dtype(self):
|
||||
r"""
|
||||
A simple test to check if the model succesfully stores the original dtype
|
||||
A simple test to check if the model successfully stores the original dtype
|
||||
"""
|
||||
self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype"))
|
||||
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
|
||||
@ -655,8 +655,8 @@ class MixedInt8TestPipeline(BaseMixedInt8Test):
|
||||
def test_pipeline(self):
|
||||
r"""
|
||||
The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since
|
||||
we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything
|
||||
on pipline.
|
||||
we used pipeline for inference speed benchmarking we want to make sure that this feature does not break anything
|
||||
on pipeline.
|
||||
"""
|
||||
# self._clear_cuda_cache()
|
||||
self.pipe = pipeline(
|
||||
|
@ -167,7 +167,7 @@ class GPTQTest(unittest.TestCase):
|
||||
|
||||
def test_original_dtype(self):
|
||||
r"""
|
||||
A simple test to check if the model succesfully stores the original dtype
|
||||
A simple test to check if the model successfully stores the original dtype
|
||||
"""
|
||||
self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype"))
|
||||
self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype"))
|
||||
@ -261,7 +261,7 @@ class GPTQTest(unittest.TestCase):
|
||||
if self.device_map == "cpu":
|
||||
quant_type = "ipex" if is_ipex_available() else "torch"
|
||||
else:
|
||||
# We expecte tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
|
||||
# We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
|
||||
# TODO: Remove this once GPTQModel exllama kernels supports packing
|
||||
quant_type = "tritonv2"
|
||||
quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(
|
||||
@ -433,7 +433,7 @@ class GPTQTestExllamaV2(unittest.TestCase):
|
||||
"exllamav2",
|
||||
)
|
||||
else:
|
||||
# We expecte tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
|
||||
# We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354
|
||||
# TODO: Remove this once GPTQModel exllama kernels supports packing
|
||||
self.assertEqual(
|
||||
self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE,
|
||||
@ -458,7 +458,7 @@ class GPTQTestExllamaV2(unittest.TestCase):
|
||||
|
||||
def test_generate_quality(self):
|
||||
"""
|
||||
Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens
|
||||
Simple test to check the quality of the model by comparing the the generated tokens with the expected tokens
|
||||
"""
|
||||
self.check_inference_correctness(self.quantized_model)
|
||||
|
||||
|
@ -184,7 +184,7 @@ class HiggsTest(unittest.TestCase):
|
||||
output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
|
||||
self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
|
||||
|
||||
@unittest.skip("This will almost surely OOM. Enable when swithed to a smaller model")
|
||||
@unittest.skip("This will almost surely OOM. Enable when switched to a smaller model")
|
||||
def test_dequantize(self):
|
||||
"""
|
||||
Test the ability to dequantize a model
|
||||
|
@ -202,7 +202,7 @@ class TorchAoGPUTest(TorchAoTest):
|
||||
|
||||
def test_int4wo_offload(self):
|
||||
"""
|
||||
Simple test that checks if the quantized model int4 wieght only is working properly with cpu/disk offload
|
||||
Simple test that checks if the quantized model int4 weight only is working properly with cpu/disk offload
|
||||
"""
|
||||
|
||||
device_map_offload = {
|
||||
@ -254,7 +254,7 @@ class TorchAoGPUTest(TorchAoTest):
|
||||
@require_torch_multi_gpu
|
||||
def test_int4wo_quant_multi_gpu(self):
|
||||
"""
|
||||
Simple test that checks if the quantized model int4 wieght only is working properly with multiple GPUs
|
||||
Simple test that checks if the quantized model int4 weight only is working properly with multiple GPUs
|
||||
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUS
|
||||
"""
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user