Rework pipeline tests (#19366)

* Rework pipeline tests

* Try to fix Flax tests

* Try to put it before

* Use a new decorator instead

* Remove ignore marker since it doesn't work

* Filter pipeline tests

* Woopsie

* Use the fitlered list

* Clean up and fake modif

* Remove init

* Revert fake modif
This commit is contained in:
Sylvain Gugger 2022-10-07 18:01:58 -04:00 committed by GitHub
parent 983451a13e
commit 9ac586b3c8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 95 additions and 149 deletions

View File

@ -79,10 +79,19 @@ jobs:
path: ~/transformers/tests_fetched_summary.txt
- run: |
if [ -f test_list.txt ]; then
mv test_list.txt test_preparation/test_list.txt
cp test_list.txt test_preparation/test_list.txt
else
touch test_preparation/test_list.txt
fi
- run: python utils/tests_fetcher.py --filter_pipeline_tests
- run: |
if [ -f test_list.txt ]; then
mv test_list.txt test_preparation/filtered_test_list.txt
else
touch test_preparation/filtered_test_list.txt
fi
- store_artifacts:
path: ~/transformers/test_preparation/filtered_test_list.txt
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
- store_artifacts:
path: ~/transformers/examples_tests_fetched_summary.txt
@ -97,6 +106,7 @@ jobs:
root: test_preparation/
paths:
test_list.txt
filtered_test_list.txt
examples_test_list.txt
# To run all tests for the nightly build
@ -110,6 +120,8 @@ jobs:
mkdir test_preparation
echo "tests" > test_preparation/test_list.txt
echo "tests" > test_preparation/examples_test_list.txt
- run: python utils/tests_fetcher.py --filter_pipeline_tests
- run: mv test_list.txt test_preparation/filtered_test_list.txt
- persist_to_workspace:
root: test_preparation/
@ -132,7 +144,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -152,7 +164,7 @@ jobs:
key: v0.5-torch_and_tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/filtered_test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -174,7 +186,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -192,7 +204,7 @@ jobs:
key: v0.5-torch_and_flax-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/filtered_test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -213,7 +225,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -231,7 +243,7 @@ jobs:
key: v0.5-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -252,7 +264,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -269,7 +281,7 @@ jobs:
key: v0.5-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -290,7 +302,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -306,7 +318,7 @@ jobs:
key: v0.5-flax-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -318,7 +330,6 @@ jobs:
- image: cimg/python:3.7.12
environment:
OMP_NUM_THREADS: 1
RUN_PIPELINE_TESTS: yes
TRANSFORMERS_IS_CI: yes
PYTEST_TIMEOUT: 120
resource_class: xlarge
@ -345,7 +356,7 @@ jobs:
key: v0.5-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch tests/pipelines | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -357,7 +368,6 @@ jobs:
- image: cimg/python:3.7.12
environment:
OMP_NUM_THREADS: 1
RUN_PIPELINE_TESTS: yes
TRANSFORMERS_IS_CI: yes
PYTEST_TIMEOUT: 120
resource_class: xlarge
@ -382,7 +392,7 @@ jobs:
key: v0.5-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests/pipelines | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -401,7 +411,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -557,7 +567,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -575,7 +585,7 @@ jobs:
key: v0.5-hub-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/filtered_test_list.txt) -m is_staging_test | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -596,7 +606,7 @@ jobs:
- attach_workspace:
at: ~/transformers/test_preparation
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
if [ ! -s test_preparation/filtered_test_list.txt ]; then
echo "No tests to run, exiting early!"
circleci-agent step halt
fi
@ -610,7 +620,7 @@ jobs:
key: v0.5-onnx-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/filtered_test_list.txt) -k onnx | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
@ -690,7 +700,7 @@ jobs:
steps:
- checkout
- attach_workspace:
at: ~/transformers/test_preparation
at: ~/transformers/filtered_test_list.txt
- run: |
if [ ! -s test_preparation/test_list.txt ]; then
echo "No tests to run, exiting early!"

View File

@ -256,10 +256,8 @@ jobs:
- name: Run all pipeline tests on GPU
working-directory: /transformers
env:
RUN_PIPELINE_TESTS: yes
run: |
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines
- name: Failure short reports
if: ${{ failure() }}
@ -301,10 +299,8 @@ jobs:
- name: Run all pipeline tests on GPU
working-directory: /transformers
env:
RUN_PIPELINE_TESTS: yes
run: |
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines
- name: Failure short reports
if: ${{ always() }}

View File

@ -32,7 +32,6 @@ warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_configure(config):
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipeline are tested")
config.addinivalue_line(
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
)

View File

@ -133,7 +133,6 @@ _run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=Fa
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=False)
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False)
_run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False)
_tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
@ -176,25 +175,6 @@ def is_pt_flax_cross_test(test_case):
return pytest.mark.is_pt_flax_cross_test()(test_case)
def is_pipeline_test(test_case):
"""
Decorator marking a test as a pipeline test.
Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable
to a truthy value and selecting the is_pipeline_test pytest mark.
"""
if not _run_pipeline_tests:
return unittest.skip("test is pipeline test")(test_case)
else:
try:
import pytest # We don't need a hard dependency on pytest in the main library
except ImportError:
return test_case
else:
return pytest.mark.is_pipeline_test()(test_case)
def is_staging_test(test_case):
"""
Decorator marking a test as a staging test.
@ -309,6 +289,18 @@ def require_torch(test_case):
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_or_tf(test_case):
"""
Decorator marking a test that requires PyTorch or TensorFlow.
These tests are skipped when neither PyTorch not TensorFlow is installed.
"""
return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
test_case
)
def require_intel_extension_for_pytorch(test_case):
"""
Decorator marking a test that requires Intel Extension for PyTorch.

View File

@ -18,19 +18,11 @@ import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torchaudio, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
@require_torch
class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING

View File

@ -31,7 +31,6 @@ from transformers.pipelines import AutomaticSpeechRecognitionPipeline, pipeline
from transformers.pipelines.audio_utils import chunk_bytes_iter
from transformers.pipelines.automatic_speech_recognition import chunk_iter
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_pyctcdecode,
@ -52,7 +51,6 @@ if is_torch_available():
# from .test_pipelines_common import CustomInputPipelineCommonMixin
@is_pipeline_test
class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = {
k: v

View File

@ -48,13 +48,13 @@ from transformers.testing_utils import (
USER,
CaptureLogger,
RequestCounter,
is_pipeline_test,
is_staging_test,
nested_simplify,
require_scatter,
require_tensorflow_probability,
require_tf,
require_torch,
require_torch_or_tf,
slow,
)
from transformers.utils import is_tf_available, is_torch_available
@ -307,7 +307,6 @@ class PipelineTestCaseMeta(type):
return type.__new__(mcs, name, bases, dct)
@is_pipeline_test
class CommonPipelineTest(unittest.TestCase):
@require_torch
def test_pipeline_iteration(self):
@ -416,7 +415,6 @@ class CommonPipelineTest(unittest.TestCase):
self.assertEqual(len(outputs), 20)
@is_pipeline_test
class PipelinePadTest(unittest.TestCase):
@require_torch
def test_pipeline_padding(self):
@ -498,7 +496,6 @@ class PipelinePadTest(unittest.TestCase):
)
@is_pipeline_test
class PipelineUtilsTest(unittest.TestCase):
@require_torch
def test_pipeline_dataset(self):
@ -795,7 +792,6 @@ class CustomPipeline(Pipeline):
return model_outputs["logits"].softmax(-1).numpy()
@is_pipeline_test
class CustomPipelineTest(unittest.TestCase):
def test_warning_logs(self):
transformers_logging.set_verbosity_debug()
@ -835,6 +831,7 @@ class CustomPipelineTest(unittest.TestCase):
# Clean registry for next tests.
del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"]
@require_torch_or_tf
def test_dynamic_pipeline(self):
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
@ -886,6 +883,7 @@ class CustomPipelineTest(unittest.TestCase):
[{"label": "LABEL_0", "score": 0.505}],
)
@require_torch_or_tf
def test_cached_pipeline_has_minimum_calls_to_head(self):
# Make sure we have cached the pipeline.
_ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert")

View File

@ -29,7 +29,7 @@ from transformers import (
TFAutoModelForCausalLM,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device
from transformers.testing_utils import require_tf, require_torch, slow, torch_device
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -37,7 +37,6 @@ from .test_pipelines_common import ANY, PipelineTestCaseMeta
DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0
@is_pipeline_test
class ConversationalPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = dict(
list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items())

View File

@ -18,7 +18,6 @@ from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoToke
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectron2,
require_pytesseract,
@ -53,7 +52,6 @@ INVOICE_URL = (
)
@is_pipeline_test
@require_torch
@require_vision
class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):

View File

@ -22,12 +22,11 @@ from transformers import (
LxmertConfig,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch
from transformers.testing_utils import nested_simplify, require_tf, require_torch
from .test_pipelines_common import PipelineTestCaseMeta
@is_pipeline_test
class FeatureExtractionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_MAPPING
tf_model_mapping = TF_MODEL_MAPPING

View File

@ -16,19 +16,11 @@ import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class FillMaskPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_MASKED_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING

View File

@ -22,10 +22,10 @@ from transformers import (
)
from transformers.pipelines import ImageClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
slow,
)
@ -43,7 +43,7 @@ else:
pass
@is_pipeline_test
@require_torch_or_tf
@require_vision
class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING

View File

@ -31,15 +31,7 @@ from transformers import (
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -62,7 +54,6 @@ def hashimage(image: Image) -> str:
@require_vision
@require_timm
@require_torch
@is_pipeline_test
class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = {
k: v

View File

@ -16,7 +16,7 @@ import unittest
from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, require_vision, slow
from transformers.testing_utils import require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -31,7 +31,6 @@ else:
pass
@is_pipeline_test
@require_vision
class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING

View File

@ -22,15 +22,7 @@ from transformers import (
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -48,7 +40,6 @@ else:
@require_vision
@require_timm
@require_torch
@is_pipeline_test
class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING

View File

@ -22,12 +22,11 @@ from transformers import (
)
from transformers.data.processors.squad import SquadExample
from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_or_tf, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class QAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
@ -345,7 +344,7 @@ between them. It's straightforward to train your models with one before loading
self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"})
@is_pipeline_test
@require_torch_or_tf
class QuestionAnsweringArgumentHandlerTests(unittest.TestCase):
def test_argument_handler(self):
qa = QuestionAnsweringArgumentHandler()

View File

@ -23,7 +23,7 @@ from transformers import (
T5Config,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device
from transformers.testing_utils import require_tf, require_torch, slow, torch_device
from transformers.tokenization_utils import TruncationStrategy
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -32,7 +32,6 @@ from .test_pipelines_common import ANY, PipelineTestCaseMeta
DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0
@is_pipeline_test
class SummarizationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING

View File

@ -23,7 +23,6 @@ from transformers import (
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
require_pandas,
require_tensorflow_probability,
require_tf,
@ -35,7 +34,6 @@ from transformers.testing_utils import (
from .test_pipelines_common import PipelineTestCaseMeta
@is_pipeline_test
class TQAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
# Putting it there for consistency, but TQA do not have fast tokenizer
# which are needed to generate automatic tests

View File

@ -20,7 +20,7 @@ from transformers import (
Text2TextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.testing_utils import require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -30,7 +30,6 @@ if is_torch_available():
import torch
@is_pipeline_test
class Text2TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING

View File

@ -20,12 +20,11 @@ from transformers import (
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class TextClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING

View File

@ -16,17 +16,17 @@ import unittest
from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
@require_torch_or_tf
class TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING

View File

@ -25,14 +25,7 @@ from transformers import (
pipeline,
)
from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -40,7 +33,6 @@ from .test_pipelines_common import ANY, PipelineTestCaseMeta
VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]]
@is_pipeline_test
class TokenClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
@ -770,7 +762,6 @@ class TokenClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTest
)
@is_pipeline_test
class TokenClassificationArgumentHandlerTestCase(unittest.TestCase):
def setUp(self):
self.args_parser = TokenClassificationArgumentHandler()

View File

@ -25,12 +25,11 @@ from transformers import (
TranslationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow
from transformers.testing_utils import require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class TranslationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
@ -118,7 +117,6 @@ class TranslationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta
)
@is_pipeline_test
class TranslationNewFormatPipelineTests(unittest.TestCase):
@require_torch
@slow

View File

@ -16,14 +16,7 @@ import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -38,7 +31,6 @@ else:
pass
@is_pipeline_test
@require_torch
@require_vision
class VisualQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):

View File

@ -21,12 +21,11 @@ from transformers import (
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@is_pipeline_test
class ZeroShotClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING

View File

@ -16,14 +16,7 @@ import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow
from .test_pipelines_common import ANY, PipelineTestCaseMeta
@ -39,7 +32,6 @@ else:
@require_vision
@is_pipeline_test
class ZeroShotImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta):
# Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping,
# and only CLIP would be there for now.

View File

@ -619,6 +619,25 @@ def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None, j
json.dump(test_map, fp, ensure_ascii=False)
def filter_pipeline_tests(output_file):
if not os.path.isfile(output_file):
print("No test file found.")
return
with open(output_file, "r", encoding="utf-8") as f:
test_files = f.read().split(" ")
if len(test_files) == 0:
print("No tests to filter.")
return
if test_files == ["tests"]:
test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py", "pipelines"]]
else:
test_files = [f for f in test_files if not f.startswith(os.path.join("tests", "pipelines"))]
with open(output_file, "w", encoding="utf-8") as f:
f.write(" ".join(test_files))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
@ -645,6 +664,11 @@ if __name__ == "__main__":
default=["tests"],
help="Only keep the test files matching one of those filters.",
)
parser.add_argument(
"--filter_pipeline_tests",
action="store_true",
help="Will filter the pipeline tests outside of the generated list of tests.",
)
parser.add_argument(
"--print_dependencies_of",
type=str,
@ -656,6 +680,8 @@ if __name__ == "__main__":
print_tree_deps_of(args.print_dependencies_of)
elif args.sanity_check:
sanity_check()
elif args.filter_pipeline_tests:
filter_pipeline_tests(args.output_file)
else:
repo = Repo(PATH_TO_TRANFORMERS)