fix typos
This commit is contained in:
omahs 2025-05-06 15:45:20 +02:00 committed by GitHub
parent 057ae00504
commit 274e79b326
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 25 additions and 25 deletions

View File

@ -108,7 +108,7 @@ If in doubt about what args/kwargs a given model sends to the attention function
## Accessing current available implementations ## Accessing current available implementations
Most of the time, you will simply need to `register` a new function. If, however, you need to access an existing one, Most of the time, you will simply need to `register` a new function. If, however, you need to access an existing one,
and/or perform a few checks, the prefered way is to use the global `ALL_ATTENTION_FUNCTIONS`. It behaves the same way you and/or perform a few checks, the preferred way is to use the global `ALL_ATTENTION_FUNCTIONS`. It behaves the same way you
would expect from a usual Python dictionary: would expect from a usual Python dictionary:
```python ```python

View File

@ -19,7 +19,7 @@ Questa guida si concentra su come addestrare in maniera efficiente grandi modell
## Mixed precision con IPEX ## Mixed precision con IPEX
IPEX è ottimizzato per CPU con AVX-512 o superiore, e funziona per le CPU con solo AVX2. Pertanto, si prevede che le prestazioni saranno più vantaggiose per le le CPU Intel con AVX-512 o superiori, mentre le CPU con solo AVX2 (ad esempio, le CPU AMD o le CPU Intel più vecchie) potrebbero ottenere prestazioni migliori con IPEX, ma non sono garantite. IPEX offre ottimizzazioni delle prestazioni per l'addestramento della CPU sia con Float32 che con BFloat16. L'uso di BFloat16 è l'argomento principale delle seguenti sezioni. IPEX è ottimizzato per CPU con AVX-512 o superiore, e funziona per le CPU con solo AVX2. Pertanto, si prevede che le prestazioni saranno più vantaggiose per le CPU Intel con AVX-512 o superiori, mentre le CPU con solo AVX2 (ad esempio, le CPU AMD o le CPU Intel più vecchie) potrebbero ottenere prestazioni migliori con IPEX, ma non sono garantite. IPEX offre ottimizzazioni delle prestazioni per l'addestramento della CPU sia con Float32 che con BFloat16. L'uso di BFloat16 è l'argomento principale delle seguenti sezioni.
Il tipo di dati a bassa precisione BFloat16 è stato supportato in modo nativo su 3rd Generation Xeon® Scalable Processors (aka Cooper Lake) con AVX512 e sarà supportata dalla prossima generazione di Intel® Xeon® Scalable Processors con Intel® Advanced Matrix Extensions (Intel® AMX) instruction set con prestazioni ulteriormente migliorate. L'Auto Mixed Precision per il backende della CPU è stato abilitato da PyTorch-1.10. allo stesso tempo, il supporto di Auto Mixed Precision con BFloat16 per CPU e l'ottimizzazione degli operatori BFloat16 è stata abilitata in modo massiccio in Intel® Extension per PyTorch, and parzialmente aggiornato al branch master di PyTorch. Gli utenti possono ottenere prestazioni migliori ed users experience con IPEX Auto Mixed Precision.. Il tipo di dati a bassa precisione BFloat16 è stato supportato in modo nativo su 3rd Generation Xeon® Scalable Processors (aka Cooper Lake) con AVX512 e sarà supportata dalla prossima generazione di Intel® Xeon® Scalable Processors con Intel® Advanced Matrix Extensions (Intel® AMX) instruction set con prestazioni ulteriormente migliorate. L'Auto Mixed Precision per il backende della CPU è stato abilitato da PyTorch-1.10. allo stesso tempo, il supporto di Auto Mixed Precision con BFloat16 per CPU e l'ottimizzazione degli operatori BFloat16 è stata abilitata in modo massiccio in Intel® Extension per PyTorch, and parzialmente aggiornato al branch master di PyTorch. Gli utenti possono ottenere prestazioni migliori ed users experience con IPEX Auto Mixed Precision..

View File

@ -2277,7 +2277,7 @@ class MoshiForConditionalGeneration(MoshiPreTrainedModel, GenerationMixin):
generation_config, kwargs = self._prepare_generation_config(kwargs.pop("generation_config", None), **kwargs) generation_config, kwargs = self._prepare_generation_config(kwargs.pop("generation_config", None), **kwargs)
input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs = ( input_ids, user_audio_codes, moshi_audio_codes, concat_unconditional_inputs = (
self._check_and_maybe_initalize_inputs( self._check_and_maybe_initialize_inputs(
input_ids=input_ids, input_ids=input_ids,
user_input_values=user_input_values, user_input_values=user_input_values,
user_audio_codes=user_audio_codes, user_audio_codes=user_audio_codes,
@ -2707,7 +2707,7 @@ class MoshiForConditionalGeneration(MoshiPreTrainedModel, GenerationMixin):
attention_mask=attention_mask, attention_mask=attention_mask,
) )
def _check_and_maybe_initalize_inputs( def _check_and_maybe_initialize_inputs(
self, self,
input_ids=None, input_ids=None,
user_input_values=None, user_input_values=None,

View File

@ -593,8 +593,8 @@ class RagModel(RagPreTrainedModel):
context_input_ids, context_input_ids,
context_attention_mask, context_attention_mask,
retrieved_doc_embeds, retrieved_doc_embeds,
retrived_doc_input_ids, retrieved_doc_input_ids,
retrived_doc_attention_mask, retrieved_doc_attention_mask,
retrieved_doc_ids, retrieved_doc_ids,
) = ( ) = (
retriever_outputs["context_input_ids"], retriever_outputs["context_input_ids"],
@ -608,10 +608,10 @@ class RagModel(RagPreTrainedModel):
context_input_ids = context_input_ids.to(input_ids) context_input_ids = context_input_ids.to(input_ids)
context_attention_mask = context_attention_mask.to(input_ids) context_attention_mask = context_attention_mask.to(input_ids)
retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids) retrieved_doc_input_ids = retrieved_doc_input_ids.to(input_ids)
retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids) retrieved_doc_attention_mask = retrieved_doc_attention_mask.to(input_ids)
retrieved_doc_embeds = self.ctx_encoder( retrieved_doc_embeds = self.ctx_encoder(
retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True retrieved_doc_input_ids, attention_mask=retrieved_doc_attention_mask, return_dict=True
).pooler_output ).pooler_output
retrieved_doc_embeds = retrieved_doc_embeds.view( retrieved_doc_embeds = retrieved_doc_embeds.view(
-1, n_docs, question_encoder_last_hidden_state.shape[1] -1, n_docs, question_encoder_last_hidden_state.shape[1]

View File

@ -3391,7 +3391,7 @@ class SeamlessM4TForTextToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin):
`Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)`and and `waveform_lengths` which gives the length of each sample. sequence_length)` and `waveform_lengths` which gives the length of each sample.
""" """
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
@ -3721,7 +3721,7 @@ class SeamlessM4TForSpeechToSpeech(SeamlessM4TPreTrainedModel, GenerationMixin):
`Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)`and and `waveform_lengths` which gives the length of each sample. sequence_length)` and `waveform_lengths` which gives the length of each sample.
""" """
batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds")) batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds"))
@ -4132,7 +4132,7 @@ class SeamlessM4TModel(SeamlessM4TPreTrainedModel, GenerationMixin):
`Union[SeamlessM4TGenerationOutput, Tuple[Tensor], ModelOutput]`: `Union[SeamlessM4TGenerationOutput, Tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`]. - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4TGenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`. - If `generate_speech=False`, it will returns `ModelOutput`.
""" """
if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None: if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None:

View File

@ -3691,7 +3691,7 @@ class SeamlessM4Tv2ForTextToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMixin
`Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`: `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)`and and `waveform_lengths` which gives the length of each sample. sequence_length)` and `waveform_lengths` which gives the length of each sample.
""" """
batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds")) batch_size = len(input_ids) if input_ids is not None else len(kwargs.get("inputs_embeds"))
@ -4062,7 +4062,7 @@ class SeamlessM4Tv2ForSpeechToSpeech(SeamlessM4Tv2PreTrainedModel, GenerationMix
`Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`: `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor]]`:
- If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. - If `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size, - If not `return_intermediate_token_ids`, returns a tuple composed of waveforms of shape `(batch_size,
sequence_length)`and and `waveform_lengths` which gives the length of each sample. sequence_length)` and `waveform_lengths` which gives the length of each sample.
""" """
batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds")) batch_size = len(input_features) if input_features is not None else len(kwargs.get("inputs_embeds"))
@ -4514,7 +4514,7 @@ class SeamlessM4Tv2Model(SeamlessM4Tv2PreTrainedModel, GenerationMixin):
`Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor], ModelOutput]`: `Union[SeamlessM4Tv2GenerationOutput, Tuple[Tensor], ModelOutput]`:
- If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`]. - If `generate_speech` and `return_intermediate_token_ids`, returns [`SeamlessM4Tv2GenerationOutput`].
- If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of - If `generate_speech` and not `return_intermediate_token_ids`, returns a tuple composed of waveforms of
shape `(batch_size, sequence_length)`and and `waveform_lengths` which gives the length of each sample. shape `(batch_size, sequence_length)` and `waveform_lengths` which gives the length of each sample.
- If `generate_speech=False`, it will returns `ModelOutput`. - If `generate_speech=False`, it will returns `ModelOutput`.
""" """
if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None: if input_ids is None and input_features is None and kwargs.get("inputs_embeds", None) is None:

View File

@ -275,7 +275,7 @@ class ColPaliForRetrievalModelTest(ModelTesterMixin, unittest.TestCase):
pass pass
@unittest.skip( @unittest.skip(
reason="PaliGemmma's SigLip encoder uses the same initialization scheme as the Flax original implementation" reason="PaliGemma's SigLip encoder uses the same initialization scheme as the Flax original implementation"
) )
def test_initialization(self): def test_initialization(self):
pass pass

View File

@ -431,7 +431,7 @@ class DeepseekV3ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
def test_past_key_values_format(self): def test_past_key_values_format(self):
""" """
Overwritting to pass the expected cache shapes (Deepseek-V3 uses MLA so the cache shapes are non-standard) Overwriting to pass the expected cache shapes (Deepseek-V3 uses MLA so the cache shapes are non-standard)
""" """
config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config, inputs = self.model_tester.prepare_config_and_inputs_for_common()
batch_size, seq_length = inputs["input_ids"].shape batch_size, seq_length = inputs["input_ids"].shape
@ -451,7 +451,7 @@ class DeepseekV3ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTeste
@slow @slow
def test_eager_matches_sdpa_generate(self): def test_eager_matches_sdpa_generate(self):
""" """
Overwritting the common test as the test is flaky on tiny models Overwriting the common test as the test is flaky on tiny models
""" """
max_new_tokens = 30 max_new_tokens = 30

View File

@ -136,7 +136,7 @@ class MarianTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
decode_kwargs={"use_source_tokenizer": True}, decode_kwargs={"use_source_tokenizer": True},
) )
def test_tokenizer_integration_seperate_vocabs(self): def test_tokenizer_integration_separate_vocabs(self):
tokenizer = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs") tokenizer = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs")
source_text = "Tämä on testi" source_text = "Tämä on testi"

View File

@ -69,7 +69,7 @@ class FlaxOPTModelTester:
embed_dim=16, embed_dim=16,
word_embed_proj_dim=16, word_embed_proj_dim=16,
initializer_range=0.02, initializer_range=0.02,
attn_implemetation="eager", attn_implementation="eager",
): ):
self.parent = parent self.parent = parent
self.batch_size = batch_size self.batch_size = batch_size
@ -92,7 +92,7 @@ class FlaxOPTModelTester:
self.word_embed_proj_dim = word_embed_proj_dim self.word_embed_proj_dim = word_embed_proj_dim
self.initializer_range = initializer_range self.initializer_range = initializer_range
self.is_encoder_decoder = False self.is_encoder_decoder = False
self.attn_implementation = attn_implemetation self.attn_implementation = attn_implementation
def prepare_config_and_inputs(self): def prepare_config_and_inputs(self):
input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size)

View File

@ -297,7 +297,7 @@ class PaliGemmaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTes
pass pass
@unittest.skip( @unittest.skip(
reason="PaliGemmma's SigLip encoder uses the same initialization scheme as the Flax original implementation" reason="PaliGemma's SigLip encoder uses the same initialization scheme as the Flax original implementation"
) )
def test_initialization(self): def test_initialization(self):
pass pass

View File

@ -294,7 +294,7 @@ class PaliGemma2ForConditionalGenerationModelTest(ModelTesterMixin, GenerationTe
pass pass
@unittest.skip( @unittest.skip(
reason="PaliGemmma's SigLip encoder uses the same initialization scheme as the Flax original implementation" reason="PaliGemma's SigLip encoder uses the same initialization scheme as the Flax original implementation"
) )
def test_initialization(self): def test_initialization(self):
pass pass

View File

@ -18,7 +18,7 @@ Utility that checks whether the copies defined in the library match the original
- The list of models in the main README.md matches the ones in the localized READMEs, - The list of models in the main README.md matches the ones in the localized READMEs,
- Files that are registered as full copies of one another in the `FULL_COPIES` constant of this script. - Files that are registered as full copies of one another in the `FULL_COPIES` constant of this script.
This also checks the list of models in the README is complete (has all models) and add a line to complete if there is This also checks the list of models in the README is complete (has all models) and adds a line to complete if there is
a model missing. a model missing.
Use from the root of the repo with: Use from the root of the repo with:
@ -420,7 +420,7 @@ def find_code_in_transformers(
# Detail: the `Copied from` statement is originally designed to work with the last part of `TRANSFORMERS_PATH`, # Detail: the `Copied from` statement is originally designed to work with the last part of `TRANSFORMERS_PATH`,
# (which is `transformers`). The same should be applied for `MODEL_TEST_PATH`. However, its last part is `models` # (which is `transformers`). The same should be applied for `MODEL_TEST_PATH`. However, its last part is `models`
# (to only check and search in it) which is a bit confusing. So we keep the copied statement staring with # (to only check and search in it) which is a bit confusing. So we keep the copied statement starting with
# `tests.models.` and change it to `tests` here. # `tests.models.` and change it to `tests` here.
if base_path == MODEL_TEST_PATH: if base_path == MODEL_TEST_PATH:
base_path = "tests" base_path = "tests"