chore: Fix typo s/exclusivelly/exclusively/ (#28361)

This commit is contained in:
hugo-syn 2024-01-05 22:19:15 +01:00 committed by GitHub
parent 7226f3d2b0
commit 4ab5fb8941
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 12 additions and 12 deletions

View File

@ -317,7 +317,7 @@ generation.
## StoppingCriteria
A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token). Please note that this is exclusivelly available to our PyTorch implementations.
A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token). Please note that this is exclusively available to our PyTorch implementations.
[[autodoc]] StoppingCriteria
- __call__
@ -333,7 +333,7 @@ A [`StoppingCriteria`] can be used to change when to stop generation (other than
## Constraints
A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusivelly available to our PyTorch implementations.
A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusively available to our PyTorch implementations.
[[autodoc]] Constraint

View File

@ -1889,7 +1889,7 @@ class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor):
<Tip warning={true}>
This logits processor is exclusivelly compatible with
This logits processor is exclusively compatible with
[MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen)
</Tip>
@ -1948,7 +1948,7 @@ class AlternatingCodebooksLogitsProcessor(LogitsProcessor):
<Tip warning={true}>
This logits processor is exclusivelly compatible with
This logits processor is exclusively compatible with
[Bark](https://huggingface.co/docs/transformers/en/model_doc/bark)'s fine submodel. See the model documentation
for examples.
@ -2109,7 +2109,7 @@ class BarkEosPrioritizerLogitsProcessor(LogitsProcessor):
<Tip warning={true}>
This logits processor is exclusivelly compatible with
This logits processor is exclusively compatible with
[Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples.
</Tip>

View File

@ -1240,7 +1240,7 @@ class LlamaForCausalLM(LlamaPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -504,7 +504,7 @@ class LlavaForConditionalGeneration(LlavaPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -1207,7 +1207,7 @@ class MistralForCausalLM(MistralPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -1387,7 +1387,7 @@ class MixtralForCausalLM(MixtralPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -838,7 +838,7 @@ class PersimmonForCausalLM(PersimmonPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -1095,7 +1095,7 @@ class PhiForCausalLM(PhiPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]

View File

@ -503,7 +503,7 @@ class VipLlavaForConditionalGeneration(VipLlavaPreTrainedModel):
# Keep only the unprocessed tokens:
# 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
# some of the inputs are exclusivelly passed as part of the cache (e.g. when passing input_embeds as
# some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
# input)
if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]