mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
fix spelling errors (#38608)
* fix errors test_modeling_mllama.py * fix error test_modeling_video_llava.py * fix errors test_processing_common.py
This commit is contained in:
parent
0f833528c9
commit
fa921ad854
@ -366,15 +366,15 @@ class MllamaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTester
|
|||||||
def test_assisted_decoding_with_num_logits_to_keep(self):
|
def test_assisted_decoding_with_num_logits_to_keep(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip(reason="Mllama uses self.weights dirrectly causing device mismatch when offloading`")
|
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
|
||||||
def test_cpu_offload(self):
|
def test_cpu_offload(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip(reason="Mllama uses self.weights dirrectly causing device mismatch when offloading`")
|
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
|
||||||
def test_disk_offload_bin(self):
|
def test_disk_offload_bin(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip(reason="Mllama uses self.weights dirrectly causing device mismatch when offloading`")
|
@unittest.skip(reason="Mllama uses self.weights directly causing device mismatch when offloading`")
|
||||||
def test_disk_offload_safetensors(self):
|
def test_disk_offload_safetensors(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -399,7 +399,7 @@ class VideoLlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTe
|
|||||||
for model_class in self.all_model_classes:
|
for model_class in self.all_model_classes:
|
||||||
model = model_class(config).to(torch_device)
|
model = model_class(config).to(torch_device)
|
||||||
curr_input_dict = copy.deepcopy(input_dict)
|
curr_input_dict = copy.deepcopy(input_dict)
|
||||||
_ = model(**curr_input_dict) # successfull forward with no modifications
|
_ = model(**curr_input_dict) # successful forward with no modifications
|
||||||
|
|
||||||
# remove one image but leave the image token in text
|
# remove one image but leave the image token in text
|
||||||
curr_input_dict["pixel_values_images"] = curr_input_dict["pixel_values_images"][-1:, ...]
|
curr_input_dict["pixel_values_images"] = curr_input_dict["pixel_values_images"][-1:, ...]
|
||||||
|
@ -915,7 +915,7 @@ class ProcessorTesterMixin:
|
|||||||
)
|
)
|
||||||
|
|
||||||
@require_av
|
@require_av
|
||||||
@parameterized.expand([(1, "pt"), (2, "pt")]) # video processor suports only torchvision
|
@parameterized.expand([(1, "pt"), (2, "pt")]) # video processor supports only torchvision
|
||||||
def test_apply_chat_template_video(self, batch_size: int, return_tensors: str):
|
def test_apply_chat_template_video(self, batch_size: int, return_tensors: str):
|
||||||
self._test_apply_chat_template(
|
self._test_apply_chat_template(
|
||||||
"video", batch_size, return_tensors, "videos_input_name", "video_processor", MODALITY_INPUT_DATA["videos"]
|
"video", batch_size, return_tensors, "videos_input_name", "video_processor", MODALITY_INPUT_DATA["videos"]
|
||||||
|
Loading…
Reference in New Issue
Block a user