diff --git a/src/transformers/models/beit/image_processing_beit.py b/src/transformers/models/beit/image_processing_beit.py index 0e81cb9c446..10a6bfad22b 100644 --- a/src/transformers/models/beit/image_processing_beit.py +++ b/src/transformers/models/beit/image_processing_beit.py @@ -131,6 +131,15 @@ class BeitImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.do_reduce_labels = do_reduce_labels + @property + def reduce_labels(self) -> bool: + warnings.warn( + "The `reduce_labels` property is deprecated and will be removed in v4.27. Please use" + " `do_reduce_labels` instead.", + FutureWarning, + ) + return self.do_reduce_labels + @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ diff --git a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py index b5f4c639f7e..aed1ee65c23 100644 --- a/src/transformers/models/conditional_detr/image_processing_conditional_detr.py +++ b/src/transformers/models/conditional_detr/image_processing_conditional_detr.py @@ -815,6 +815,16 @@ class ConditionalDetrImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad + @property + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size + def max_size(self): + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + return self.size["longest_edge"] + @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): diff --git a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py index 499313dd529..12a32ac5939 100644 --- a/src/transformers/models/deformable_detr/image_processing_deformable_detr.py +++ b/src/transformers/models/deformable_detr/image_processing_deformable_detr.py @@ -813,6 +813,16 @@ class DeformableDetrImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad + @property + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size + def max_size(self): + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + return self.size["longest_edge"] + @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): diff --git a/src/transformers/models/detr/image_processing_detr.py b/src/transformers/models/detr/image_processing_detr.py index 957360a96ca..0e22eeb8932 100644 --- a/src/transformers/models/detr/image_processing_detr.py +++ b/src/transformers/models/detr/image_processing_detr.py @@ -797,6 +797,15 @@ class DetrImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad + @property + def max_size(self): + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + return self.size["longest_edge"] + @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ diff --git a/src/transformers/models/segformer/image_processing_segformer.py b/src/transformers/models/segformer/image_processing_segformer.py index 5e592cb042d..f8122e38c16 100644 --- a/src/transformers/models/segformer/image_processing_segformer.py +++ b/src/transformers/models/segformer/image_processing_segformer.py @@ -119,6 +119,15 @@ class SegformerImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_reduce_labels = do_reduce_labels + @property + def reduce_labels(self): + warnings.warn( + "The `reduce_labels` property is deprecated and will be removed in a v4.27. Please use " + "`do_reduce_labels` instead.", + FutureWarning, + ) + return self.do_reduce_labels + @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ diff --git a/src/transformers/models/yolos/image_processing_yolos.py b/src/transformers/models/yolos/image_processing_yolos.py index ff0cd23caa5..f21b38283dd 100644 --- a/src/transformers/models/yolos/image_processing_yolos.py +++ b/src/transformers/models/yolos/image_processing_yolos.py @@ -725,6 +725,16 @@ class YolosImageProcessor(BaseImageProcessor): self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad + @property + # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size + def max_size(self): + warnings.warn( + "The `max_size` parameter is deprecated and will be removed in v4.27. " + "Please specify in `size['longest_edge'] instead`.", + FutureWarning, + ) + return self.size["longest_edge"] + @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->Yolos def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): diff --git a/tests/models/beit/test_image_processing_beit.py b/tests/models/beit/test_image_processing_beit.py index 95348bfe639..b9b809108f3 100644 --- a/tests/models/beit/test_image_processing_beit.py +++ b/tests/models/beit/test_image_processing_beit.py @@ -353,7 +353,7 @@ class BeitImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase) self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) - image_processing.reduce_labels = True + image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255) diff --git a/tests/models/segformer/test_image_processing_segformer.py b/tests/models/segformer/test_image_processing_segformer.py index a05b3349e80..2be0228d65a 100644 --- a/tests/models/segformer/test_image_processing_segformer.py +++ b/tests/models/segformer/test_image_processing_segformer.py @@ -339,7 +339,7 @@ class SegformerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.Test self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 150) - image_processing.reduce_labels = True + image_processing.do_reduce_labels = True encoding = image_processing(image, map, return_tensors="pt") self.assertTrue(encoding["labels"].min().item() >= 0) self.assertTrue(encoding["labels"].max().item() <= 255)