mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[T5 failing CI] Fix generate test (#11770)
* fix_torch_device_generate_test * remove @
This commit is contained in:
parent
680d181ce8
commit
43891be19b
@ -1084,9 +1084,13 @@ class GenerationTesterMixin:
|
||||
continue
|
||||
|
||||
head_masking = {
|
||||
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads),
|
||||
"decoder_head_mask": torch.zeros(config.decoder_layers, config.decoder_attention_heads),
|
||||
"cross_attn_head_mask": torch.zeros(config.decoder_layers, config.decoder_attention_heads),
|
||||
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device),
|
||||
"decoder_head_mask": torch.zeros(
|
||||
config.decoder_layers, config.decoder_attention_heads, device=torch_device
|
||||
),
|
||||
"cross_attn_head_mask": torch.zeros(
|
||||
config.decoder_layers, config.decoder_attention_heads, device=torch_device
|
||||
),
|
||||
}
|
||||
|
||||
signature = inspect.signature(model.forward)
|
||||
|
@ -605,19 +605,22 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
config = config_and_inputs[0]
|
||||
max_length = config_and_inputs[1].shape[-1] + 3
|
||||
model = T5ForConditionalGeneration(config)
|
||||
model = T5ForConditionalGeneration(config).eval()
|
||||
model.to(torch_device)
|
||||
|
||||
head_masking = {
|
||||
"head_mask": torch.zeros(config.num_layers, config.num_heads),
|
||||
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads),
|
||||
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads),
|
||||
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device),
|
||||
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),
|
||||
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),
|
||||
}
|
||||
|
||||
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
|
||||
head_masks = {name: mask}
|
||||
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
|
||||
if name == "head_mask":
|
||||
head_masks["decoder_head_mask"] = torch.ones(config.num_decoder_layers, config.num_heads)
|
||||
head_masks["decoder_head_mask"] = torch.ones(
|
||||
config.num_decoder_layers, config.num_heads, device=torch_device
|
||||
)
|
||||
|
||||
out = model.generate(
|
||||
config_and_inputs[1],
|
||||
|
Loading…
Reference in New Issue
Block a user