diff --git a/tests/models/efficientnet/test_modeling_efficientnet.py b/tests/models/efficientnet/test_modeling_efficientnet.py index 023325ce424..4162e189140 100644 --- a/tests/models/efficientnet/test_modeling_efficientnet.py +++ b/tests/models/efficientnet/test_modeling_efficientnet.py @@ -83,6 +83,7 @@ class EfficientNetModelTester: def get_config(self): return EfficientNetConfig( + image_size=self.image_size, num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, diff --git a/tests/utils/tiny_model_summary.json b/tests/utils/tiny_model_summary.json index 7d9140f379a..911783bc5cf 100644 --- a/tests/utils/tiny_model_summary.json +++ b/tests/utils/tiny_model_summary.json @@ -1718,7 +1718,7 @@ "model_classes": [ "EfficientNetForImageClassification" ], - "sha": "6ed195ee636d2c0b885139da8c7b45d57ebaeee0" + "sha": "993d088cf937b8a90b61f68677cd8f261321c745" }, "EfficientNetModel": { "tokenizer_classes": [], @@ -7243,4 +7243,4 @@ ], "sha": "e144d9f1fe39c21eda1177702640e126892605ce" } -} \ No newline at end of file +} diff --git a/utils/create_dummy_models.py b/utils/create_dummy_models.py index daa330839ca..e151b37d52b 100644 --- a/utils/create_dummy_models.py +++ b/utils/create_dummy_models.py @@ -504,6 +504,27 @@ def convert_feature_extractor(feature_extractor, tiny_config): if to_convert: feature_extractor = feature_extractor.__class__(**kwargs) + # Sanity check: on tiny image feature extractors, a large image size results in slow CI -- up to the point where it + # can result in timeout issues. + if ( + isinstance(feature_extractor, BaseImageProcessor) + and hasattr(feature_extractor, "size") + and isinstance(feature_extractor.size, dict) + ): + largest_image_size = max(feature_extractor.size.values()) + if largest_image_size > 64: + # hardcoded exceptions + models_with_large_image_size = ("deformable_detr", "flava", "grounding_dino", "mgp_str", "swiftformer") + if any(model_name in tiny_config.model_type for model_name in models_with_large_image_size): + pass + else: + raise ValueError( + f"Image size of {tiny_config.model_type} is too large ({feature_extractor.size}). " + "Please reduce it to 64 or less on each dimension. The following steps are usually the " + "easiest solution: 1) confirm that you're setting `image_size` in your ModelTester class; " + "2) ensure that it gets passed to the tester config init, `get_config()`." + ) + return feature_extractor @@ -526,14 +547,14 @@ def convert_processors(processors, tiny_config, output_folder, result): # sanity check 1: fast and slow tokenizers should be compatible (vocab_size) if fast_tokenizer is not None and slow_tokenizer is not None: if fast_tokenizer.vocab_size != slow_tokenizer.vocab_size: - warning_messagae = ( + warning_message = ( "The fast/slow tokenizers " f"({fast_tokenizer.__class__.__name__}/{slow_tokenizer.__class__.__name__}) have different " "vocabulary size: " f"fast_tokenizer.vocab_size = {fast_tokenizer.vocab_size} and " f"slow_tokenizer.vocab_size = {slow_tokenizer.vocab_size}." ) - result["warnings"].append(warning_messagae) + result["warnings"].append(warning_message) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None @@ -541,12 +562,12 @@ def convert_processors(processors, tiny_config, output_folder, result): # sanity check 2: fast and slow tokenizers should be compatible (length) if fast_tokenizer is not None and slow_tokenizer is not None: if len(fast_tokenizer) != len(slow_tokenizer): - warning_messagae = ( + warning_message = ( f"The fast/slow tokenizers () have different length: " f"len(fast_tokenizer) = {len(fast_tokenizer)} and " f"len(slow_tokenizer) = {len(slow_tokenizer)}." ) - result["warnings"].append(warning_messagae) + result["warnings"].append(warning_message) if not keep_fast_tokenizer: fast_tokenizer = None slow_tokenizer = None @@ -1395,7 +1416,7 @@ def create_tiny_models( raise ValueError(f"This script should be run from the root of the clone of `transformers` {clone_path}") report_path = os.path.join(output_path, "reports") - os.makedirs(report_path) + os.makedirs(report_path, exist_ok=True) _pytorch_arch_mappings = [ x diff --git a/utils/get_test_info.py b/utils/get_test_info.py index d6b451e71f3..3c376bdbdaa 100644 --- a/utils/get_test_info.py +++ b/utils/get_test_info.py @@ -53,7 +53,15 @@ def get_module_path(test_file): def get_test_module(test_file): """Get the module of a model test file.""" test_module_path = get_module_path(test_file) - test_module = importlib.import_module(test_module_path) + try: + test_module = importlib.import_module(test_module_path) + except AttributeError as exc: + # e.g. if you have a `tests` folder in `site-packages`, created by another package, when trying to import + # `tests.models...` + raise ValueError( + f"Could not import module {test_module_path}. Confirm that you don't have a package with the same root " + "name installed or in your environment's `site-packages`." + ) from exc return test_module