Albert is ExecuTorch compatible (#34476)

Co-authored-by: Guang Yang <guangyang@fb.com>
This commit is contained in:
Guang Yang 2024-10-29 08:22:13 -07:00 committed by GitHub
parent 34620e8f0a
commit f339042b0b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -16,7 +16,9 @@
import unittest
from transformers import AlbertConfig, is_torch_available
from packaging import version
from transformers import AlbertConfig, AutoTokenizer, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
@ -342,3 +344,45 @@ class AlbertModelIntegrationTest(unittest.TestCase):
)
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4))
@slow
def test_export(self):
if version.parse(torch.__version__) < version.parse("2.4.0"):
self.skipTest(reason="This test requires torch >= 2.4 to run.")
distilbert_model = "albert/albert-base-v2"
device = "cpu"
attn_implementation = "sdpa"
max_length = 64
tokenizer = AutoTokenizer.from_pretrained(distilbert_model)
inputs = tokenizer(
f"Paris is the {tokenizer.mask_token} of France.",
return_tensors="pt",
padding="max_length",
max_length=max_length,
)
model = AlbertForMaskedLM.from_pretrained(
distilbert_model,
device_map=device,
attn_implementation=attn_implementation,
)
logits = model(**inputs).logits
eg_predicted_mask = tokenizer.decode(logits[0, 4].topk(5).indices)
self.assertEqual(
eg_predicted_mask.split(),
["capital", "capitol", "comune", "arrondissement", "bastille"],
)
exported_program = torch.export.export(
model,
args=(inputs["input_ids"],),
kwargs={"attention_mask": inputs["attention_mask"]},
strict=True,
)
result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"])
ep_predicted_mask = tokenizer.decode(result.logits[0, 4].topk(5).indices)
self.assertEqual(eg_predicted_mask, ep_predicted_mask)