mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 03:01:07 +06:00
try fixes
This commit is contained in:
parent
3f936df662
commit
a8ad75ef69
@ -281,7 +281,7 @@ class OPTModelIntegrationTests(unittest.TestCase):
|
|||||||
attention_mask = input_ids.ne(model.config.pad_token_id)
|
attention_mask = input_ids.ne(model.config.pad_token_id)
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
|
output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
|
||||||
expected_shape = torch.Size((1, 11, 1024))
|
expected_shape = torch.Size((1, 11, 512))
|
||||||
self.assertEqual(output.shape, expected_shape)
|
self.assertEqual(output.shape, expected_shape)
|
||||||
expected_slice = torch.tensor(
|
expected_slice = torch.tensor(
|
||||||
[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device
|
[[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device
|
||||||
@ -348,8 +348,8 @@ class OPTGenerationTest(unittest.TestCase):
|
|||||||
GEN_OUTPUT = []
|
GEN_OUTPUT = []
|
||||||
|
|
||||||
tokenizer = GPT2Tokenizer.from_pretrained("patrickvonplaten/opt_gpt2_tokenizer")
|
tokenizer = GPT2Tokenizer.from_pretrained("patrickvonplaten/opt_gpt2_tokenizer")
|
||||||
for model in self.all_model_path:
|
for path_model in self.all_model_path:
|
||||||
model = OPTForCausalLM.from_pretrained(self.path_model)
|
model = OPTForCausalLM.from_pretrained(path_model)
|
||||||
model = model.eval()
|
model = model.eval()
|
||||||
model.config.eos_token_id = tokenizer.eos_token_id
|
model.config.eos_token_id = tokenizer.eos_token_id
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user