mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-16 19:18:24 +06:00
fix mpt test of different outputs from cuda (#37691)
* fix mpt test Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix mpt tests with Expectations Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix typo Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix output Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix format Signed-off-by: jiqing-feng <jiqing.feng@intel.com> --------- Signed-off-by: jiqing-feng <jiqing.feng@intel.com>
This commit is contained in:
parent
0cfbf9c95b
commit
555693fbfa
@ -20,6 +20,7 @@ from transformers import MptConfig, is_torch_available
|
|||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
Expectations,
|
Expectations,
|
||||||
require_bitsandbytes,
|
require_bitsandbytes,
|
||||||
|
require_deterministic_for_xpu,
|
||||||
require_torch,
|
require_torch,
|
||||||
require_torch_accelerator,
|
require_torch_accelerator,
|
||||||
slow,
|
slow,
|
||||||
@ -483,6 +484,7 @@ class MptIntegrationTests(unittest.TestCase):
|
|||||||
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||||
self.assertEqual(decoded_output, expected_output)
|
self.assertEqual(decoded_output, expected_output)
|
||||||
|
|
||||||
|
@require_deterministic_for_xpu
|
||||||
def test_generation_batched(self):
|
def test_generation_batched(self):
|
||||||
model_id = "mosaicml/mpt-7b"
|
model_id = "mosaicml/mpt-7b"
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||||
@ -498,10 +500,19 @@ class MptIntegrationTests(unittest.TestCase):
|
|||||||
|
|
||||||
inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(torch_device)
|
inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(torch_device)
|
||||||
|
|
||||||
expected_output = [
|
expected_outputs = Expectations(
|
||||||
"Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for the",
|
{
|
||||||
"Today I am going at the gym and then I am going to go to the grocery store. I am going to buy some food and some",
|
("xpu", 3): [
|
||||||
]
|
"Hello my name is Tiffany. I am a mother of two beautiful children. I have been a nanny for over",
|
||||||
|
"Today I am going at the gym and then I am going to go to the mall with my mom. I am going to go to the",
|
||||||
|
],
|
||||||
|
("cuda", 7): [
|
||||||
|
"Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for the",
|
||||||
|
"Today I am going at the gym and then I am going to go to the grocery store. I am going to buy some food and some",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
expected_output = expected_outputs.get_expectation()
|
||||||
outputs = model.generate(**inputs, max_new_tokens=20)
|
outputs = model.generate(**inputs, max_new_tokens=20)
|
||||||
|
|
||||||
decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||||
|
Loading…
Reference in New Issue
Block a user