[Generation] fix docs for decoder_input_ids (#5306)

* fix docs

* Update src/transformers/modeling_utils.py

* Update src/transformers/modeling_tf_utils.py

* Update src/transformers/modeling_tf_utils.py

* Update src/transformers/modeling_utils.py

* Update src/transformers/modeling_tf_utils.py

* Update src/transformers/modeling_utils.py
This commit is contained in:
Patrick von Platen 2020-06-26 16:58:11 +02:00 committed by GitHub
parent 79a82cc06a
commit 08c9607c3d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 6 additions and 4 deletions

View File

@ -642,8 +642,9 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin):
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``
if no ``decoder_start_token_id`` is found in the config.
This is only relevant for encoder-decoder models.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.

View File

@ -962,8 +962,9 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin):
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id=None: (`optional`) int
If an encoder-decoder model starts decoding with a different token than BOS.
Defaults to `None` and is changed to `BOS` later.
Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``
if no ``decoder_start_token_id`` is found in the config.
This is only relevant for encoder-decoder models.
use_cache: (`optional`) bool
If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.