mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 19:21:31 +06:00
Fix flaky test for log level (#21776)
* Fix flaky test for log level * Fix other flaky test
This commit is contained in:
parent
acfb714bdf
commit
b29e2dcaff
@ -1093,18 +1093,20 @@ class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon):
|
||||
self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]]))
|
||||
self.assertTrue(np.all(seen[expected.shape[0] :] == -100))
|
||||
|
||||
# FIXME: sgugger
|
||||
@unittest.skip(reason="might be flaky after PR #21700. Skip for now.")
|
||||
def test_log_level(self):
|
||||
# testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere)
|
||||
logger = logging.get_logger()
|
||||
log_info_string = "Running training"
|
||||
|
||||
# test with the default log_level - should be warning and thus not log on the main process
|
||||
# test with the default log_level - should be the same as before and thus we test depending on is_info
|
||||
is_info = logging.get_verbosity() <= 20
|
||||
with CaptureLogger(logger) as cl:
|
||||
trainer = get_regression_trainer()
|
||||
trainer.train()
|
||||
self.assertNotIn(log_info_string, cl.out)
|
||||
if is_info:
|
||||
self.assertIn(log_info_string, cl.out)
|
||||
else:
|
||||
self.assertNotIn(log_info_string, cl.out)
|
||||
|
||||
# test with low log_level - lower than info
|
||||
with CaptureLogger(logger) as cl:
|
||||
|
@ -109,6 +109,7 @@ class HfArgumentParserTest(unittest.TestCase):
|
||||
|
||||
def test_advisory_warnings(self):
|
||||
# testing `logger.warning_advice()`
|
||||
transformers.utils.logging._reset_library_root_logger()
|
||||
|
||||
logger = logging.get_logger("transformers.models.bart.tokenization_bart")
|
||||
msg = "Testing 1, 2, 3"
|
||||
|
Loading…
Reference in New Issue
Block a user