mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-23 14:29:01 +06:00

* add a multi-gpu job for all example tests * run only ported tests * rename * explain why env is re-activated on each step * mark all unported/checked tests with @require_torch_non_multigpu_but_fix_me * style * Apply suggestions from code review Co-authored-by: Sam Shleifer <sshleifer@gmail.com> Co-authored-by: Sam Shleifer <sshleifer@gmail.com>
27 lines
892 B
Python
27 lines
892 B
Python
import os
|
|
import tempfile
|
|
import unittest
|
|
|
|
from transformers.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
|
|
from transformers.file_utils import cached_property
|
|
from transformers.testing_utils import require_torch_non_multigpu_but_fix_me, slow
|
|
|
|
|
|
@unittest.skipUnless(os.path.exists(DEFAULT_REPO), "Tatoeba directory does not exist.")
|
|
class TatoebaConversionTester(unittest.TestCase):
|
|
@cached_property
|
|
def resolver(self):
|
|
tmp_dir = tempfile.mkdtemp()
|
|
return TatoebaConverter(save_dir=tmp_dir)
|
|
|
|
@slow
|
|
@require_torch_non_multigpu_but_fix_me
|
|
def test_resolver(self):
|
|
self.resolver.convert_models(["heb-eng"])
|
|
|
|
@slow
|
|
@require_torch_non_multigpu_but_fix_me
|
|
def test_model_card(self):
|
|
content, mmeta = self.resolver.write_model_card("opus-mt-he-en", dry_run=True)
|
|
assert mmeta["long_pair"] == "heb-eng"
|