Fix pad image transform for batched inputs (#37544)

* fix

* add batch dimension to expected output
This commit is contained in:
Sebastiaan Vermeulen 2025-05-08 11:51:15 +02:00 committed by GitHub
parent 5c47d08b0d
commit 015b6dfbf8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 20 additions and 1 deletions

View File

@ -751,7 +751,7 @@ def pad(
values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))
# Add additional padding if there's a batch dimension
values = (0, *values) if image.ndim == 4 else values
values = ((0, 0), *values) if image.ndim == 4 else values
return values
padding = _expand_for_data_format(padding)

View File

@ -578,6 +578,25 @@ class ImageTransformsTester(unittest.TestCase):
)
)
# Test that padding works on batched images
image = np.array(
[
[[0, 1], [2, 3]],
]
)[None, ...]
expected_image = np.array(
[
[[0, 0], [0, 1], [2, 3]],
[[0, 0], [0, 0], [0, 0]],
]
)[None, ...]
# fmt: on
self.assertTrue(
np.allclose(
expected_image, pad(image, ((0, 1), (1, 0)), mode="constant", input_data_format="channels_last")
)
)
@require_vision
def test_convert_to_rgb(self):
# Test that an RGBA image is converted to RGB