Fix flaky test for log level (#21776)

* Fix flaky test for log level

* Fix other flaky test
This commit is contained in:
Sylvain Gugger 2023-02-28 22:24:14 +01:00 committed by GitHub
parent acfb714bdf
commit b29e2dcaff
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 4 deletions

View File

@ -1093,18 +1093,20 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
# FIXME: sgugger
@unittest.skip(reason="might be flaky after PR #21700. Skip for now.")
def test_log_level(self):
# testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere)
logger = logging.get_logger()
log_info_string = "Running training"
# test with the default log_level - should be warning and thus not log on the main process
# test with the default log_level - should be the same as before and thus we test depending on is_info
is_info = logging.get_verbosity() <= 20
with CaptureLogger(logger) as cl:
trainer = get_regression_trainer()
trainer.train()
self.assertNotIn(log_info_string, cl.out)
if is_info:
self.assertIn(log_info_string, cl.out)
else:
self.assertNotIn(log_info_string, cl.out)
# test with low log_level - lower than info
with CaptureLogger(logger) as cl:

View File

@ -109,6 +109,7 @@ class HfArgumentParserTest(unittest.TestCase):
def test_advisory_warnings(self):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
msg = "Testing 1, 2, 3"