Fix MistralIntegrationTest OOM (#26754)

* fix

* fix

* fix

---------

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
Yih-Dar 2023-10-12 12:31:11 +02:00 committed by GitHub
parent 72256bc72a
commit db5e0c3292
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -15,6 +15,7 @@
""" Testing suite for the PyTorch Mistral model. """
import gc
import tempfile
import unittest
@ -447,17 +448,23 @@ class MistralIntegrationTest(unittest.TestCase):
print(out[0, 0, :30])
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, atol=1e-4, rtol=1e-4)
del model
torch.cuda.empty_cache()
gc.collect()
@slow
def test_model_7b_generation(self):
EXPECTED_TEXT_COMPLETION = (
"""My favourite condiment is mayonnaise. I love it on sandwiches, in salads, on burgers"""
)
EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. Im not a big"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(torch_device)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1").to(torch_device)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto")
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
torch.cuda.empty_cache()
gc.collect()