VLM Generate: tag test_static_cache_matches_dynamic as flaky (#33630)

flaky
This commit is contained in:
Joao Gante 2024-10-03 12:27:02 +01:00 committed by GitHub
parent f1a5f81296
commit 6f0ce52760
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -4714,6 +4714,7 @@ class ModelTesterMixin:
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
@is_flaky(max_attempts=10) # TODO @raushan: this test is VERY flaky on some VLMs, like paligemma
def test_static_cache_matches_dynamic(self):
"""
Tests that generating with static cache give almost same results as with dynamic cache.
@ -4749,7 +4750,7 @@ class ModelTesterMixin:
output_logits=True,
return_dict_in_generate=True,
)
self.assertTrue(torch.allclose(dynamic_out.logits[0], static_out.logits[0], rtol=1e-3, atol=1e-3))
self.assertTrue(torch.allclose(dynamic_out.logits[0], static_out.logits[0], rtol=1e-3, atol=1e-4))
# For now, Let's focus only on GPU for `torch.compile`
@slow