Fix from_args_and_dict ProcessorMixin (#38296)

* fix-from-args-and-dict-processormixin

* change used_kwargs to valid_kwargs

* remove manual valid_kwargs

* fix copies

* fix modular aria
This commit is contained in:
Yoni Gozlan 2025-05-28 11:46:33 -04:00 committed by GitHub
parent f844733568
commit 21b10d9aa4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
37 changed files with 39 additions and 181 deletions

View File

@ -936,7 +936,6 @@ class AriaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template", "size_conversion"]
image_processor_class = "AriaImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -60,7 +60,6 @@ class AriaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template", "size_conversion"]
image_processor_class = "AriaImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -18,17 +18,8 @@ from typing import List, Optional, Union
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import (
ImageInput,
make_flat_list_of_images,
)
from ...processing_utils import (
ImagesKwargs,
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...image_utils import ImageInput, make_flat_list_of_images
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
@ -87,19 +78,6 @@ class AyaVisionProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"image_token",
"patch_size",
"img_size",
"downsample_factor",
"start_of_img_token",
"end_of_img_token",
"img_patch_token",
"img_line_break_token",
"tile_token",
"tile_global_token",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -55,7 +55,6 @@ class BlipProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = []
image_processor_class = ("BlipImageProcessor", "BlipImageProcessorFast")
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")

View File

@ -21,12 +21,7 @@ from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import (
AddedToken,
BatchEncoding,
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import AddedToken, BatchEncoding, PreTokenizedInput, TextInput
from ...utils import logging
@ -67,7 +62,6 @@ class Blip2Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["num_query_tokens"]
image_processor_class = ("BlipImageProcessor", "BlipImageProcessorFast")
tokenizer_class = "AutoTokenizer"

View File

@ -72,7 +72,6 @@ class ChameleonProcessor(ProcessorMixin):
attributes = ["image_processor", "tokenizer"]
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
valid_kwargs = ["image_seq_length", "image_token"]
image_processor_class = "ChameleonImageProcessor"
def __init__(self, image_processor, tokenizer, image_seq_length: int = 1024, image_token: str = "<image>"):

View File

@ -90,7 +90,6 @@ class ColPaliProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template"]
image_processor_class = ("SiglipImageProcessor", "SiglipImageProcessorFast")
tokenizer_class = ("GemmaTokenizer", "GemmaTokenizerFast")

View File

@ -31,10 +31,7 @@ if is_soundfile_available():
from ...audio_utils import AudioInput, make_list_of_audio
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import AudioKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import (
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
class CsmAudioKwargs(AudioKwargs, total=False):
@ -99,7 +96,6 @@ class CsmProcessor(ProcessorMixin):
"""
attributes = ["feature_extractor", "tokenizer"]
valid_kwargs = ["chat_template"]
feature_extractor_class = "EncodecFeatureExtractor"
tokenizer_class = "PreTrainedTokenizerFast"

View File

@ -71,7 +71,6 @@ class Emu3Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template"]
tokenizer_class = ("GPT2Tokenizer", "GPT2TokenizerFast")
image_processor_class = "Emu3ImageProcessor"

View File

@ -350,7 +350,6 @@ class FuyuProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = []
image_processor_class = "FuyuImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -51,7 +51,6 @@ class Gemma3ProcessorKwargs(ProcessingKwargs, total=False):
class Gemma3Processor(ProcessorMixin):
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template", "image_seq_length"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -95,7 +95,6 @@ class GotOcr2Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "PreTrainedTokenizerFast"

View File

@ -31,8 +31,6 @@ logger = logging.get_logger(__name__)
class GraniteSpeechProcessor(ProcessorMixin):
attributes = ["audio_processor", "tokenizer"]
valid_kwargs = ["audio_token"]
audio_processor_class = "GraniteSpeechFeatureExtractor"
tokenizer_class = "AutoTokenizer"

View File

@ -211,7 +211,6 @@ class IdeficsProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["image_size", "add_end_of_utterance_token"]
image_processor_class = "IdeficsImageProcessor"
tokenizer_class = "LlamaTokenizerFast"

View File

@ -85,7 +85,6 @@ class Idefics2Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["image_seq_len", "chat_template"]
image_processor_class = "Idefics2ImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -133,7 +133,6 @@ class Idefics3Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["image_seq_len", "chat_template"]
image_processor_class = "Idefics3ImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -22,12 +22,7 @@ from typing import List, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import (
AddedToken,
BatchEncoding,
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import AddedToken, BatchEncoding, PreTokenizedInput, TextInput
from ...utils import logging
from ..auto import AutoTokenizer
@ -72,7 +67,6 @@ class InstructBlipProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "qformer_tokenizer"]
valid_kwargs = ["num_query_tokens"]
image_processor_class = ("BlipImageProcessor", "BlipImageProcessorFast")
tokenizer_class = "AutoTokenizer"
qformer_tokenizer_class = "AutoTokenizer"

View File

@ -57,7 +57,6 @@ class InstructBlipVideoProcessor(ProcessorMixin):
"""
attributes = ["video_processor", "tokenizer", "qformer_tokenizer"]
valid_kwargs = ["num_query_tokens"]
video_processor_class = "AutoVideoProcessor"
tokenizer_class = "AutoTokenizer"
qformer_tokenizer_class = "AutoTokenizer"

View File

@ -18,18 +18,8 @@ from typing import List, Optional, Union
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import (
ImageInput,
concatenate_list,
make_flat_list_of_images,
)
from ...processing_utils import (
ImagesKwargs,
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...image_utils import ImageInput, concatenate_list, make_flat_list_of_images
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...video_utils import VideoInput, VideoMetadata, load_video, make_batched_videos
@ -74,10 +64,6 @@ class InternVLProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
valid_kwargs = [
"chat_template",
"image_seq_length",
]
image_processor_class = "AutoImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -21,10 +21,7 @@ from typing import List, Union
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, TextKwargs, Unpack
from ...tokenization_utils_base import (
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import logging
@ -68,7 +65,6 @@ class JanusProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template", "use_default_system_prompt"]
image_processor_class = "JanusImageProcessor"
tokenizer_class = "LlamaTokenizerFast"

View File

@ -84,7 +84,6 @@ class Kosmos2Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["num_patch_index_tokens"]
image_processor_class = ("CLIPImageProcessor", "CLIPImageProcessorFast")
tokenizer_class = "AutoTokenizer"

View File

@ -16,19 +16,11 @@
from typing import List, Optional, Union
from transformers.processing_utils import (
ImagesKwargs,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from transformers.processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from transformers.tokenization_utils_base import PreTokenizedInput, TextInput
from ...image_processing_utils import BatchFeature
from ...image_utils import (
ImageInput,
make_flat_list_of_images,
)
from ...image_utils import ImageInput, make_flat_list_of_images
class Llama4ImagesKwargs(ImagesKwargs, total=False):
@ -83,19 +75,6 @@ class Llama4Processor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"image_token",
"patch_size",
"img_size",
"downsample_factor",
"start_of_img_token",
"end_of_img_token",
"img_patch_token",
"img_line_break_token",
"tile_token",
"tile_global_token",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -70,13 +70,6 @@ class LlavaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"vision_feature_select_strategy",
"image_token",
"num_additional_image_tokens",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -76,13 +76,6 @@ class LlavaNextProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"vision_feature_select_strategy",
"image_token",
"num_additional_image_tokens",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -78,14 +78,6 @@ class LlavaNextVideoProcessor(ProcessorMixin):
# video and image processor share same args, but have different processing logic
# only image processor config is saved in the hub
attributes = ["video_processor", "image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"vision_feature_select_strategy",
"image_token",
"video_token",
"num_additional_image_tokens",
]
image_processor_class = ("LlavaNextImageProcessor", "LlavaNextImageProcessorFast")
video_processor_class = "AutoVideoProcessor"
tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")

View File

@ -75,14 +75,6 @@ class LlavaOnevisionProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
valid_kwargs = [
"chat_template",
"num_image_tokens",
"vision_feature_select_strategy",
"image_token",
"video_token",
"vision_aspect_ratio",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
video_processor_class = "AutoVideoProcessor"

View File

@ -22,10 +22,7 @@ import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, make_nested_list_of_images
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import (
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
class MllamaImagesKwargs(ImagesKwargs, total=False):
@ -208,7 +205,6 @@ class MllamaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template"]
image_processor_class = "MllamaImageProcessor"
tokenizer_class = "PreTrainedTokenizerFast"

View File

@ -31,11 +31,7 @@ from ...processing_utils import (
Unpack,
_validate_images_text_input_order,
)
from ...tokenization_utils_base import (
AddedToken,
PreTokenizedInput,
TextInput,
)
from ...tokenization_utils_base import AddedToken, PreTokenizedInput, TextInput
from ...utils import logging
@ -120,7 +116,6 @@ class PaliGemmaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = ["chat_template"]
image_processor_class = ("SiglipImageProcessor", "SiglipImageProcessorFast")
tokenizer_class = ("GemmaTokenizer", "GemmaTokenizerFast")

View File

@ -62,7 +62,6 @@ class Phi4MultimodalProcessor(ProcessorMixin):
tokenizer_class = "GPT2TokenizerFast"
image_processor_class = "Phi4MultimodalImageProcessorFast"
audio_processor_class = "Phi4MultimodalFeatureExtractor"
valid_kwargs = ["chat_template"]
def __init__(
self,

View File

@ -90,14 +90,6 @@ class PixtralProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"spatial_merge_size",
"image_token",
"image_break_token",
"image_end_token",
]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -97,7 +97,6 @@ class Qwen2_5OmniProcessor(ProcessorMixin):
video_processor_class = "Qwen2VLVideoProcessor"
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")
valid_kwargs = ["chat_template"]
def __init__(
self, image_processor=None, video_processor=None, feature_extractor=None, tokenizer=None, chat_template=None

View File

@ -75,7 +75,6 @@ class Qwen2_5_VLProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
valid_kwargs = ["chat_template"]
image_processor_class = "AutoImageProcessor"
video_processor_class = "AutoVideoProcessor"

View File

@ -60,7 +60,6 @@ class Qwen2AudioProcessor(ProcessorMixin):
"""
attributes = ["feature_extractor", "tokenizer"]
valid_kwargs = ["chat_template", "audio_token", "audio_bos_token", "audio_eos_token"]
feature_extractor_class = "WhisperFeatureExtractor"
tokenizer_class = "AutoTokenizer"

View File

@ -71,7 +71,6 @@ class Qwen2VLProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
valid_kwargs = ["chat_template"]
image_processor_class = "AutoImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = ("Qwen2Tokenizer", "Qwen2TokenizerFast")

View File

@ -139,7 +139,6 @@ class SmolVLMProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "tokenizer", "video_processor"]
valid_kwargs = ["image_seq_len", "chat_template"]
image_processor_class = "SmolVLMImageProcessor"
video_processor_class = (
"SmolVLMImageProcessor" # TODO: raushan should be VideoProcessor when LANCZOS resizing is settled

View File

@ -61,14 +61,6 @@ class VideoLlavaProcessor(ProcessorMixin):
"""
attributes = ["image_processor", "video_processor", "tokenizer"]
valid_kwargs = [
"chat_template",
"patch_size",
"vision_feature_select_strategy",
"image_token",
"video_token",
"num_additional_image_tokens",
]
image_processor_class = "VideoLlavaImageProcessor"
video_processor_class = "AutoVideoProcessor"
tokenizer_class = "AutoTokenizer"

View File

@ -497,7 +497,6 @@ class ProcessorMixin(PushToHubMixin):
feature_extractor_class = None
tokenizer_class = None
_auto_class = None
valid_kwargs: list[str] = []
# args have to match the attributes class attribute
def __init__(self, *args, **kwargs):
@ -996,18 +995,27 @@ class ProcessorMixin(PushToHubMixin):
if "auto_map" in processor_dict:
del processor_dict["auto_map"]
unused_kwargs = cls.validate_init_kwargs(processor_config=processor_dict, valid_kwargs=cls.valid_kwargs)
processor = cls(*args, **processor_dict)
# override processor_dict with given kwargs
processor_dict.update(kwargs)
# Update processor with kwargs if needed
for key in set(kwargs.keys()):
if hasattr(processor, key):
setattr(processor, key, kwargs.pop(key))
# check if there is an overlap between args and processor_dict
accepted_args_and_kwargs = cls.__init__.__code__.co_varnames[: cls.__init__.__code__.co_argcount][1:]
# validate both processor_dict and given kwargs
unused_kwargs, valid_kwargs = cls.validate_init_kwargs(
processor_config=processor_dict, valid_kwargs=accepted_args_and_kwargs
)
# remove args that are in processor_dict to avoid duplicate arguments
args_to_remove = [i for i, arg in enumerate(accepted_args_and_kwargs) if arg in processor_dict]
args = [arg for i, arg in enumerate(args) if i not in args_to_remove]
# instantiate processor with used (and valid) kwargs only
processor = cls(*args, **valid_kwargs)
kwargs.update(unused_kwargs)
logger.info(f"Processor {processor}")
if return_unused_kwargs:
return processor, kwargs
return processor, unused_kwargs
else:
return processor
@ -1294,12 +1302,16 @@ class ProcessorMixin(PushToHubMixin):
@staticmethod
def validate_init_kwargs(processor_config, valid_kwargs):
kwargs_from_config = processor_config.keys()
unused_kwargs = {}
unused_keys = set(kwargs_from_config) - set(valid_kwargs)
if unused_keys:
unused_kwargs = {k: processor_config[k] for k in unused_keys}
return unused_kwargs
kwargs_from_config = set(processor_config.keys())
valid_kwargs_set = set(valid_kwargs)
unused_keys = kwargs_from_config - valid_kwargs_set
valid_keys = kwargs_from_config & valid_kwargs_set
unused_kwargs = {k: processor_config[k] for k in unused_keys} if unused_keys else {}
valid_kwargs = {k: processor_config[k] for k in valid_keys} if valid_keys else {}
return unused_kwargs, valid_kwargs
def prepare_and_validate_optional_call_args(self, *args):
"""