mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-03 03:31:05 +06:00
VLM Generate: tag test_static_cache_matches_dynamic
as flaky (#33630)
flaky
This commit is contained in:
parent
f1a5f81296
commit
6f0ce52760
@ -4714,6 +4714,7 @@ class ModelTesterMixin:
|
||||
normalized_1 = F.softmax(out_shared_prefix_last_tokens)
|
||||
torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4)
|
||||
|
||||
@is_flaky(max_attempts=10) # TODO @raushan: this test is VERY flaky on some VLMs, like paligemma
|
||||
def test_static_cache_matches_dynamic(self):
|
||||
"""
|
||||
Tests that generating with static cache give almost same results as with dynamic cache.
|
||||
@ -4749,7 +4750,7 @@ class ModelTesterMixin:
|
||||
output_logits=True,
|
||||
return_dict_in_generate=True,
|
||||
)
|
||||
self.assertTrue(torch.allclose(dynamic_out.logits[0], static_out.logits[0], rtol=1e-3, atol=1e-3))
|
||||
self.assertTrue(torch.allclose(dynamic_out.logits[0], static_out.logits[0], rtol=1e-3, atol=1e-4))
|
||||
|
||||
# For now, Let's focus only on GPU for `torch.compile`
|
||||
@slow
|
||||
|
Loading…
Reference in New Issue
Block a user