mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-18 03:58:25 +06:00
update gemma tests (#38384)
* update * update * update * update --------- Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
parent
cd0f3ce73b
commit
07848a8405
@ -30,7 +30,6 @@ from transformers.testing_utils import (
|
|||||||
require_torch,
|
require_torch,
|
||||||
require_torch_accelerator,
|
require_torch_accelerator,
|
||||||
require_torch_gpu,
|
require_torch_gpu,
|
||||||
require_torch_sdpa,
|
|
||||||
slow,
|
slow,
|
||||||
torch_device,
|
torch_device,
|
||||||
)
|
)
|
||||||
@ -147,7 +146,7 @@ class GemmaIntegrationTest(unittest.TestCase):
|
|||||||
|
|
||||||
EXPECTED_TEXTS = [
|
EXPECTED_TEXTS = [
|
||||||
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
|
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
|
||||||
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi",
|
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
|
||||||
]
|
]
|
||||||
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).to(
|
model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16).to(
|
||||||
@ -168,34 +167,12 @@ class GemmaIntegrationTest(unittest.TestCase):
|
|||||||
|
|
||||||
EXPECTED_TEXTS = [
|
EXPECTED_TEXTS = [
|
||||||
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
|
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
|
||||||
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi",
|
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Kaju Kat",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# bfloat16 gives strange values, likely due to it has lower precision + very short prompts
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="eager"
|
model_id, low_cpu_mem_usage=True, torch_dtype=torch.float16, attn_implementation="eager"
|
||||||
)
|
|
||||||
model.to(torch_device)
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
|
||||||
inputs = tokenizer(self.input_text, return_tensors="pt", padding=True).to(torch_device)
|
|
||||||
|
|
||||||
output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
|
|
||||||
output_text = tokenizer.batch_decode(output, skip_special_tokens=True)
|
|
||||||
|
|
||||||
self.assertEqual(output_text, EXPECTED_TEXTS)
|
|
||||||
|
|
||||||
@require_torch_sdpa
|
|
||||||
@require_read_token
|
|
||||||
def test_model_2b_sdpa(self):
|
|
||||||
model_id = "google/gemma-2b"
|
|
||||||
|
|
||||||
EXPECTED_TEXTS = [
|
|
||||||
"Hello I am doing a project on the 1990s and I need to know what the most popular music",
|
|
||||||
"Hi today I am going to share with you a very easy and simple recipe of <strong><em>Khichdi",
|
|
||||||
]
|
|
||||||
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
|
||||||
model_id, low_cpu_mem_usage=True, torch_dtype=torch.bfloat16, attn_implementation="sdpa"
|
|
||||||
)
|
)
|
||||||
model.to(torch_device)
|
model.to(torch_device)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user