mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-08 07:10:06 +06:00

* WIP refactoring pipeline tests - switching to fast tokenizers * fix dialog pipeline and fill-mask * refactoring pipeline tests backbone * make large tests slow * fix tests (tf Bart inactive for now) * fix doc... * clean up for merge * fixing tests - remove bart from summarization until there is TF * fix quality and RAG * Add new translation pipeline tests - fix JAX tests * only slow for dialog * Fixing the missing TF-BART imports in modeling_tf_auto * spin out pipeline tests in separate CI job * adding pipeline test to CI YAML * add slow pipeline tests * speed up tf and pt join test to avoid redoing all the standalone pt and tf tests * Update src/transformers/tokenization_utils_base.py Co-authored-by: Sam Shleifer <sshleifer@gmail.com> * Update src/transformers/pipelines.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/pipelines.py Co-authored-by: Lysandre Debut <lysandre@huggingface.co> * Update src/transformers/testing_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * add require_torch and require_tf in is_pt_tf_cross_test Co-authored-by: Sam Shleifer <sshleifer@gmail.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Lysandre Debut <lysandre@huggingface.co>
48 lines
1.9 KiB
Python
48 lines
1.9 KiB
Python
import unittest
|
|
|
|
from transformers.pipelines import Pipeline
|
|
|
|
from .test_pipelines_common import CustomInputPipelineCommonMixin
|
|
|
|
|
|
class QAPipelineTests(CustomInputPipelineCommonMixin, unittest.TestCase):
|
|
pipeline_task = "question-answering"
|
|
small_models = [
|
|
"sshleifer/tiny-distilbert-base-cased-distilled-squad"
|
|
] # Models tested without the @slow decorator
|
|
large_models = [] # Models tested with the @slow decorator
|
|
|
|
def _test_pipeline(self, nlp: Pipeline):
|
|
output_keys = {"score", "answer", "start", "end"}
|
|
valid_inputs = [
|
|
{"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."},
|
|
{
|
|
"question": "In what field is HuggingFace working ?",
|
|
"context": "HuggingFace is a startup based in New-York founded in Paris which is trying to solve NLP.",
|
|
},
|
|
]
|
|
invalid_inputs = [
|
|
{"question": "", "context": "This is a test to try empty question edge case"},
|
|
{"question": None, "context": "This is a test to try empty question edge case"},
|
|
{"question": "What is does with empty context ?", "context": ""},
|
|
{"question": "What is does with empty context ?", "context": None},
|
|
]
|
|
self.assertIsNotNone(nlp)
|
|
|
|
mono_result = nlp(valid_inputs[0])
|
|
self.assertIsInstance(mono_result, dict)
|
|
|
|
for key in output_keys:
|
|
self.assertIn(key, mono_result)
|
|
|
|
multi_result = nlp(valid_inputs)
|
|
self.assertIsInstance(multi_result, list)
|
|
self.assertIsInstance(multi_result[0], dict)
|
|
|
|
for result in multi_result:
|
|
for key in output_keys:
|
|
self.assertIn(key, result)
|
|
for bad_input in invalid_inputs:
|
|
self.assertRaises(Exception, nlp, bad_input)
|
|
self.assertRaises(Exception, nlp, invalid_inputs)
|