From 4f90fc1db84761f094cfda4b03f99cba8b10a8b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wang=20Ran=20=28=E6=B1=AA=E7=84=B6=29?= Date: Tue, 1 Nov 2022 21:04:53 +0800 Subject: [PATCH] typo (#20001) --- .../models/encoder_decoder/modeling_encoder_decoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index ed41e6a14e8..730f6430fc8 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -568,7 +568,7 @@ class EncoderDecoderModel(PreTrainedModel): >>> input_ids = tokenizer("This is a really long text", return_tensors="pt").input_ids >>> labels = tokenizer("This is the corresponding summary", return_tensors="pt").input_ids - >>> outputs = model(input_ids=input_ids, labels=input_ids) + >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss, logits = outputs.loss, outputs.logits >>> # save and load from pretrained