From 4eb61f8e88fafc07b9fa55069616a5fb38e49012 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 19 Oct 2020 04:08:34 -0700 Subject: [PATCH] remove USE_CUDA (#7861) --- .github/workflows/self-push.yml | 2 -- .github/workflows/self-scheduled.yml | 4 ---- docs/source/testing.rst | 8 ++++---- scripts/fsmt/tests-to-run.sh | 4 ++-- src/transformers/testing_utils.py | 10 ++++++---- tests/test_skip_decorators.py | 8 ++++---- 6 files changed, 16 insertions(+), 20 deletions(-) diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index eabd6b9e1c7..0fcafcbc312 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -59,7 +59,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" # TF_GPU_MEMORY_LIMIT: 4096 OMP_NUM_THREADS: 1 - USE_CUDA: yes run: | source .env/bin/activate python -m pytest -n 2 --dist=loadfile -s ./tests/ @@ -110,7 +109,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" # TF_GPU_MEMORY_LIMIT: 4096 OMP_NUM_THREADS: 1 - USE_CUDA: yes run: | source .env/bin/activate python -m pytest -n 2 --dist=loadfile -s ./tests/ diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 4a02f4a2858..dff1b5b6679 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -57,7 +57,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" OMP_NUM_THREADS: 1 RUN_SLOW: yes - USE_CUDA: yes run: | source .env/bin/activate python -m pytest -n 1 --dist=loadfile -s ./tests/ @@ -67,7 +66,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" OMP_NUM_THREADS: 1 RUN_SLOW: yes - USE_CUDA: yes run: | source .env/bin/activate pip install -r examples/requirements.txt @@ -120,7 +118,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" OMP_NUM_THREADS: 1 RUN_SLOW: yes - USE_CUDA: yes run: | source .env/bin/activate python -m pytest -n 1 --dist=loadfile -s ./tests/ @@ -130,7 +127,6 @@ jobs: TF_FORCE_GPU_ALLOW_GROWTH: "true" OMP_NUM_THREADS: 1 RUN_SLOW: yes - USE_CUDA: yes run: | source .env/bin/activate pip install -r examples/requirements.txt diff --git a/docs/source/testing.rst b/docs/source/testing.rst index 82b88edaf21..ea2cf51b09b 100644 --- a/docs/source/testing.rst +++ b/docs/source/testing.rst @@ -22,12 +22,12 @@ How transformers are tested * `self-hosted (push) `__: runs fast tests on GPU only on commits on ``master``. It only runs if a commit on ``master`` has updated the code in one of the following folders: ``src``, ``tests``, ``.github`` (to prevent running on added model cards, notebooks, etc.) - * `self-hosted runner `__: runs slow tests on ``tests`` and ``examples``: + * `self-hosted runner `__: runs normal and slow tests on GPU in ``tests`` and ``examples``: .. code-block:: bash - RUN_SLOW=1 USE_CUDA=1 pytest tests/ - RUN_SLOW=1 USE_CUDA=1 pytest examples/ + RUN_SLOW=1 pytest tests/ + RUN_SLOW=1 pytest examples/ The results can be observed `here `__. @@ -393,7 +393,7 @@ On a GPU-enabled setup, to test in CPU-only mode add ``CUDA_VISIBLE_DEVICES=""`` CUDA_VISIBLE_DEVICES="" pytest tests/test_logging.py -or if you have multiple gpus, you can tell which one to use in this test session, e.g. to use only the second gpu if you have gpus ``0`` and ``1``, you can run: +or if you have multiple gpus, you can specify which one is to be used by ``pytest``. For example, to use only the second gpu if you have gpus ``0`` and ``1``, you can run: .. code-block:: bash diff --git a/scripts/fsmt/tests-to-run.sh b/scripts/fsmt/tests-to-run.sh index d3a74fd761c..e76ecd0aeef 100755 --- a/scripts/fsmt/tests-to-run.sh +++ b/scripts/fsmt/tests-to-run.sh @@ -2,5 +2,5 @@ # these scripts need to be run before any changes to FSMT-related code - it should cover all bases -USE_CUDA=0 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py -USE_CUDA=1 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py +CUDA_VISIBLE_DEVICES="" RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py +RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 96635ec25e3..b842150a5ed 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -187,8 +187,10 @@ def require_torch_tpu(test_case): if _torch_available: - # Set the USE_CUDA environment variable to select a GPU. - torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu" + # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode + import torch + + torch_device = "cuda" if torch.cuda.is_available() else "cpu" else: torch_device = None @@ -485,9 +487,9 @@ class TestCasePlus(unittest.TestCase): def mockenv(**kwargs): """this is a convenience wrapper, that allows this: - @mockenv(USE_CUDA=True, USE_TF=False) + @mockenv(RUN_SLOW=True, USE_TF=False) def test_something(): - use_cuda = os.getenv("USE_CUDA", False) + run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False) """ return unittest.mock.patch.dict(os.environ, kwargs) diff --git a/tests/test_skip_decorators.py b/tests/test_skip_decorators.py index 20a8541b88f..3aac3e9b3d1 100644 --- a/tests/test_skip_decorators.py +++ b/tests/test_skip_decorators.py @@ -23,10 +23,10 @@ # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # -# USE_CUDA=1 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py -# USE_CUDA=0 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py -# USE_CUDA=0 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py -# USE_CUDA=1 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py +# RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py +# RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py +# RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py +# RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest