Use attention_mask everywhere.

This commit is contained in:
Morgan Funtowicz 2019-12-09 14:13:17 +01:00
parent 348e19aa21
commit fe0f552e00

View File

@ -154,9 +154,6 @@ class QuestionAnsweringPipeline(Pipeline):
return_attention_masks=True, return_input_lengths=False
)
# TODO : Harmonize model arguments across all model
inputs['attention_mask'] = inputs.pop('encoder_attention_mask')
if is_tf_available():
# TODO trace model
start, end = self.model(inputs)