mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 12:50:06 +06:00
Update expected values (after switching to A10) - part 3 (#39179)
* fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix * fix --------- Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
9326fc332d
commit
37a239ca50
@ -18,7 +18,7 @@ import unittest
|
||||
from transformers import DPTConfig
|
||||
from transformers.file_utils import is_torch_available, is_vision_available
|
||||
from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
|
||||
@ -342,11 +342,15 @@ class DPTModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 384, 384))
|
||||
self.assertEqual(predicted_depth.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]]
|
||||
).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]],
|
||||
("cuda", 8): [[6.3215, 6.3635, 6.4155], [6.3863, 6.3622, 6.4174], [6.3530, 6.3184, 6.3583]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
def test_inference_semantic_segmentation(self):
|
||||
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-large-ade")
|
||||
|
@ -17,7 +17,7 @@ import unittest
|
||||
|
||||
from transformers import Dinov2Config, DPTConfig
|
||||
from transformers.file_utils import is_torch_available, is_vision_available
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
from transformers.utils.import_utils import get_torch_major_and_minor_version
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
@ -267,11 +267,15 @@ class DPTModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 576, 736))
|
||||
self.assertEqual(predicted_depth.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[6.0336, 7.1502, 7.4130], [6.8977, 7.2383, 7.2268], [7.9180, 8.0525, 8.0134]]
|
||||
).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[6.0336, 7.1502, 7.4130], [6.8977, 7.2383, 7.2268], [7.9180, 8.0525, 8.0134]],
|
||||
("cuda", 8): [[6.0350, 7.1518, 7.4144], [6.8992, 7.2396, 7.2280], [7.9194, 8.0538, 8.0145]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
def test_inference_depth_estimation_beit(self):
|
||||
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384")
|
||||
@ -289,11 +293,23 @@ class DPTModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 384, 384))
|
||||
self.assertEqual(predicted_depth.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[2669.7061, 2663.7144, 2674.9399], [2633.9326, 2650.9092, 2665.4270], [2621.8271, 2632.0129, 2637.2290]]
|
||||
).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[2669.7061, 2663.7144, 2674.9399],
|
||||
[2633.9326, 2650.9092, 2665.4270],
|
||||
[2621.8271, 2632.0129, 2637.2290],
|
||||
],
|
||||
("cuda", 8): [
|
||||
[2669.4292, 2663.4121, 2674.6233],
|
||||
[2633.7400, 2650.7026, 2665.2085],
|
||||
[2621.6572, 2631.8452, 2637.0525],
|
||||
],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
def test_inference_depth_estimation_swinv2(self):
|
||||
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
|
||||
@ -311,8 +327,20 @@ class DPTModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 256, 256))
|
||||
self.assertEqual(predicted_depth.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[1032.7719, 1025.1886, 1030.2661], [1023.7619, 1021.0075, 1024.9121], [1022.5667, 1018.8522, 1021.4145]]
|
||||
).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[1032.7719, 1025.1886, 1030.2661],
|
||||
[1023.7619, 1021.0075, 1024.9121],
|
||||
[1022.5667, 1018.8522, 1021.4145],
|
||||
],
|
||||
("cuda", 8): [
|
||||
[1032.7170, 1025.0629, 1030.1941],
|
||||
[1023.7309, 1020.9786, 1024.8594],
|
||||
[1022.5233, 1018.8235, 1021.3312],
|
||||
],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
@ -194,6 +194,9 @@ class DPTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_batching_equivalence(self, atol=2e-5, rtol=2e-5):
|
||||
super().test_batching_equivalence(atol=atol, rtol=rtol)
|
||||
|
||||
@unittest.skip(reason="DPT does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
@ -21,6 +21,7 @@ import numpy as np
|
||||
from tests.test_modeling_common import floats_tensor
|
||||
from transformers import AutoModelForImageClassification, OneFormerConfig, is_torch_available, is_vision_available
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
is_flaky,
|
||||
require_timm,
|
||||
require_torch,
|
||||
@ -528,7 +529,7 @@ class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
|
||||
self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3])
|
||||
|
||||
|
||||
TOLERANCE = 1e-4
|
||||
TOLERANCE = 2e-4
|
||||
|
||||
|
||||
# We will verify our results on an image of cute cats
|
||||
@ -574,12 +575,15 @@ class OneFormerModelIntegrationTest(unittest.TestCase):
|
||||
slice_hidden_state = outputs.pixel_decoder_hidden_states[0][0, 0, :3, :3]
|
||||
torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE)
|
||||
|
||||
# fmt: off
|
||||
expected_slice_hidden_state = [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]]
|
||||
expected_slice_hidden_state = torch.tensor(expected_slice_hidden_state).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]],
|
||||
("cuda", 8): [[3.0590, -1.1903, -5.1119], [3.3919, -3.3547, -5.1469], [2.6041, -4.3592, -4.1406]],
|
||||
}
|
||||
)
|
||||
expected_slice_hidden_state = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
slice_hidden_state = outputs.transformer_decoder_class_predictions[0, :3, :3]
|
||||
torch.testing.assert_close(slice_hidden_state, expected_slice_hidden_state, atol=TOLERANCE, rtol=TOLERANCE)
|
||||
# fmt: on
|
||||
|
||||
def test_inference_universal_segmentation_head(self):
|
||||
model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval()
|
||||
@ -599,8 +603,13 @@ class OneFormerModelIntegrationTest(unittest.TestCase):
|
||||
masks_queries_logits.shape,
|
||||
(1, model.config.num_queries, inputs_shape[-2] // 4, (inputs_shape[-1] + 2) // 4),
|
||||
)
|
||||
expected_slice = [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]]
|
||||
expected_slice = torch.tensor(expected_slice).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]],
|
||||
("cuda", 8): [[3.1687, 4.1893, 4.1742], [2.8768, 3.5380, 3.6257], [2.5121, 3.0552, 3.5822]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(masks_queries_logits[0, 0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
|
||||
|
||||
# class_queries_logits
|
||||
@ -609,8 +618,13 @@ class OneFormerModelIntegrationTest(unittest.TestCase):
|
||||
class_queries_logits.shape,
|
||||
(1, model.config.num_queries, model.config.num_labels + 1),
|
||||
)
|
||||
expected_slice = [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]]
|
||||
expected_slice = torch.tensor(expected_slice).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]],
|
||||
("cuda", 8): [[3.0590, -1.1903, -5.1119], [3.3919, -3.3547, -5.1469], [2.6041, -4.3592, -4.1406]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(class_queries_logits[0, :3, :3], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
|
||||
|
||||
@require_torch_accelerator
|
||||
|
@ -17,7 +17,7 @@ import unittest
|
||||
|
||||
from transformers import is_torch_available, is_vision_available
|
||||
from transformers.models.auto import get_values
|
||||
from transformers.testing_utils import require_torch, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, slow, torch_device
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
|
||||
@ -144,6 +144,9 @@ class PoolFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCa
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_batching_equivalence(self, atol=2e-4, rtol=2e-4):
|
||||
super().test_batching_equivalence(atol=atol, rtol=rtol)
|
||||
|
||||
@unittest.skip(reason="PoolFormer does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
@ -235,5 +238,11 @@ class PoolFormerModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 1000))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([-0.6113, 0.1685, -0.0492]).to(torch_device)
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [-0.6113, 0.1685, -0.0492],
|
||||
("cuda", 8): [-0.6112, 0.1690, -0.0481],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
@ -17,6 +17,7 @@ import unittest
|
||||
|
||||
from transformers import is_torch_available, is_vision_available
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
require_accelerate,
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
@ -153,6 +154,9 @@ class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
self.model_tester = PvtModelTester(self)
|
||||
self.config_tester = PvtConfigTester(self, config_class=PvtConfig)
|
||||
|
||||
def test_batching_equivalence(self, atol=1e-4, rtol=1e-4):
|
||||
super().test_batching_equivalence(atol=atol, rtol=rtol)
|
||||
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
@ -257,9 +261,15 @@ class PvtModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, model.config.num_labels))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([-1.4192, -1.9158, -0.9702]).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [-1.4192, -1.9158, -0.9702],
|
||||
("cuda", 8): [-1.4194, -1.9161, -0.9705],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
@slow
|
||||
def test_inference_model(self):
|
||||
@ -278,11 +288,15 @@ class PvtModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 50, 512))
|
||||
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]]
|
||||
).to(torch_device)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[-0.3086, 1.0402, 1.1816], [-0.2880, 0.5781, 0.6124], [0.1480, 0.6129, -0.0590]],
|
||||
("cuda", 8): [[-0.3084, 1.0402, 1.1816], [-0.2883, 0.5781, 0.6123], [0.1487, 0.6119, -0.0584]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
@slow
|
||||
@require_accelerate
|
||||
|
@ -167,6 +167,9 @@ class PvtV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
def test_batching_equivalence(self, atol=5e-4, rtol=5e-4):
|
||||
super().test_batching_equivalence(atol=atol, rtol=rtol)
|
||||
|
||||
@unittest.skip(reason="Pvt-V2 does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
|
@ -17,7 +17,7 @@ import unittest
|
||||
|
||||
from transformers import RegNetConfig
|
||||
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
|
||||
@ -146,6 +146,9 @@ class RegNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
def test_config(self):
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
def test_batching_equivalence(self, atol=3e-5, rtol=3e-5):
|
||||
super().test_batching_equivalence(atol=atol, rtol=rtol)
|
||||
|
||||
@unittest.skip(reason="RegNet does not use inputs_embeds")
|
||||
def test_inputs_embeds(self):
|
||||
pass
|
||||
@ -248,6 +251,11 @@ class RegNetModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 1000))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([-0.4180, -1.5051, -3.4836]).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [-0.4180, -1.5051, -3.4836],
|
||||
("cuda", 8): [-0.4168, -1.5056, -3.4836],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
@ -16,7 +16,7 @@
|
||||
import unittest
|
||||
|
||||
from transformers import ResNetConfig
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
from transformers.utils import cached_property, is_torch_available, is_vision_available
|
||||
|
||||
from ...test_backbone_common import BackboneTesterMixin
|
||||
@ -301,9 +301,14 @@ class ResNetModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 1000))
|
||||
self.assertEqual(outputs.logits.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor([-11.1069, -9.7877, -8.3777]).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [-11.1069, -9.7877, -8.3777],
|
||||
("cuda", 8): [-11.1112, -9.7916, -8.3788],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
|
||||
@require_torch
|
||||
|
@ -21,6 +21,7 @@ from datasets import load_dataset
|
||||
|
||||
from transformers import SegGptConfig
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
require_torch,
|
||||
require_vision,
|
||||
slow,
|
||||
@ -379,15 +380,23 @@ class SegGptModelIntegrationTest(unittest.TestCase):
|
||||
expected_shape = torch.Size((1, 3, 896, 448))
|
||||
self.assertEqual(outputs.pred_masks.shape, expected_shape)
|
||||
|
||||
expected_slice = torch.tensor(
|
||||
[
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
[[-2.1208, -2.1190, -2.1198], [-2.1237, -2.1228, -2.1227], [-2.1232, -2.1226, -2.1228]],
|
||||
[[-2.0405, -2.0396, -2.0403], [-2.0434, -2.0434, -2.0433], [-2.0428, -2.0432, -2.0434]],
|
||||
[[-1.8102, -1.8088, -1.8099], [-1.8131, -1.8126, -1.8129], [-1.8130, -1.8128, -1.8131]],
|
||||
]
|
||||
).to(torch_device)
|
||||
],
|
||||
("cuda", 8): [
|
||||
[[-2.1208, -2.1189, -2.1198], [-2.1236, -2.1229, -2.1230], [-2.1233, -2.1227, -2.1228]],
|
||||
[[-2.0408, -2.0398, -2.0405], [-2.0435, -2.0437, -2.0438], [-2.0431, -2.0435, -2.0436]],
|
||||
[[-1.8101, -1.8086, -1.8098], [-1.8129, -1.8126, -1.8130], [-1.8128, -1.8128, -1.8130]],
|
||||
],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
|
||||
|
||||
torch.testing.assert_close(outputs.pred_masks[0, :, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
torch.testing.assert_close(outputs.pred_masks[0, :, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
||||
result = image_processor.post_process_semantic_segmentation(outputs, [input_image.size[::-1]])[0]
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
import unittest
|
||||
|
||||
from transformers import Swin2SRConfig
|
||||
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
|
||||
from transformers.testing_utils import Expectations, require_torch, require_vision, slow, torch_device
|
||||
from transformers.utils import is_torch_available, is_vision_available
|
||||
|
||||
from ...test_configuration_common import ConfigTester
|
||||
@ -360,7 +360,12 @@ class Swin2SRModelIntegrationTest(unittest.TestCase):
|
||||
# verify the logits
|
||||
expected_shape = torch.Size([1, 3, 976, 1296])
|
||||
self.assertEqual(outputs.reconstruction.shape, expected_shape)
|
||||
expected_slice = torch.tensor(
|
||||
[[0.5454, 0.5542, 0.5640], [0.5518, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]], dtype=model.dtype
|
||||
).to(torch_device)
|
||||
torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
|
||||
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [[0.5454, 0.5542, 0.5640], [0.5518, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]],
|
||||
("cuda", 8): [[0.5454, 0.5547, 0.5640], [0.5522, 0.5562, 0.5649], [0.5391, 0.5425, 0.5620]],
|
||||
}
|
||||
)
|
||||
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device, dtype=model.dtype)
|
||||
torch.testing.assert_close(outputs.reconstruction[0, 0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
|
||||
|
@ -19,6 +19,7 @@ import unittest
|
||||
|
||||
from transformers import SwitchTransformersConfig, is_torch_available
|
||||
from transformers.testing_utils import (
|
||||
Expectations,
|
||||
require_tokenizers,
|
||||
require_torch,
|
||||
require_torch_accelerator,
|
||||
@ -1035,18 +1036,28 @@ class SwitchTransformerModelIntegrationTests(unittest.TestCase):
|
||||
decoder_input_ids = torch.ones((32, 64), dtype=torch.long).to(torch_device)
|
||||
|
||||
# fmt: off
|
||||
EXPECTED_MEAN_LOGITS = torch.Tensor(
|
||||
[
|
||||
expectations = Expectations(
|
||||
{
|
||||
(None, None): [
|
||||
-0.204102, -0.193359, 0.523438, -0.296875, 0.108887,
|
||||
0.0211182, 0.605469, -0.100586, -0.0551758, 0.296875,
|
||||
0.0090332, 0.174805, 0.139648, -0.170898, -0.0981445,
|
||||
0.0245361, 0.0373535, 0.050293, -0.212891, 0.129883,
|
||||
0.390625, -0.203125, -0.122559, -0.180664, 0.0437012,
|
||||
-0.349609, -0.0250244, -0.104004, -0.15918, -0.133789
|
||||
]
|
||||
).to(torch.bfloat16)
|
||||
],
|
||||
("cuda", 8): [
|
||||
-0.2051, -0.1914, 0.5352, -0.2988, 0.1108, 0.0200, 0.6094, -0.1025,
|
||||
-0.0549, 0.2988, -0.0018, 0.1758, 0.1348, -0.1689, -0.1035, 0.0266,
|
||||
0.0383, 0.0493, -0.2119, 0.1328, 0.3906, -0.2041, -0.1240, -0.1836,
|
||||
0.0454, -0.3477, -0.0256, -0.1050, -0.1572, -0.1338
|
||||
],
|
||||
}
|
||||
)
|
||||
EXPECTED_MEAN_LOGITS = torch.tensor(expectations.get_expectation()).to(torch_device, dtype=torch.bfloat16)
|
||||
# fmt: on
|
||||
hf_logits = model(input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state.cpu()
|
||||
|
||||
hf_logits = model(input_ids, decoder_input_ids=decoder_input_ids).last_hidden_state
|
||||
hf_logits = hf_logits[0, 0, :30]
|
||||
|
||||
torch.testing.assert_close(hf_logits, EXPECTED_MEAN_LOGITS, rtol=6e-3, atol=9e-3)
|
||||
|
Loading…
Reference in New Issue
Block a user