chore: fix typos (#26756)

This commit is contained in:
Heinz-Alexander Fuetterer 2023-10-12 18:00:27 +02:00 committed by GitHub
parent a243cdca2a
commit 883ed4b344
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 8 additions and 8 deletions

View File

@ -20,7 +20,7 @@ rendered properly in your Markdown viewer.
🤗 Transformers has integrated `optimum` API to perform GPTQ quantization on language models. You can load and quantize your model in 8, 4, 3 or even 2 bits without a big drop of performance and faster inference speed! This is supported by most GPU hardwares.
To learn more about the the quantization model, check out:
To learn more about the quantization model, check out:
- the [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) paper
- the `optimum` [guide](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) on GPTQ quantization
- the [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) library used as the backend

View File

@ -306,7 +306,7 @@ Create a function to preprocess the dataset so the audio samples are the same le
... return inputs
```
Apply the `preprocess_function` to the the first few examples in the dataset:
Apply the `preprocess_function` to the first few examples in the dataset:
```py
>>> processed_dataset = preprocess_function(dataset[:5])

View File

@ -315,7 +315,7 @@ class GenerationConfig(PushToHubMixin):
# Wild card
self.generation_kwargs = kwargs.pop("generation_kwargs", {})
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the the hub
# The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
# interface.
self._from_model_config = kwargs.pop("_from_model_config", False)
self._commit_hash = kwargs.pop("_commit_hash", None)

View File

@ -787,7 +787,7 @@ class TFDeiTForMaskedImageModeling(TFDeiTPreTrainedModel):
# Reconstruct pixel values
reconstructed_pixel_values = self.decoder(sequence_output, training=training)
# TF 2.0 image layers can't use NCHW format when running on CPU, so intermediate layers use NHWC,
# including the The decoder. We transpose to compute the loss against the pixel values
# including the decoder. We transpose to compute the loss against the pixel values
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
reconstructed_pixel_values = tf.transpose(reconstructed_pixel_values, (0, 3, 1, 2))

View File

@ -200,7 +200,7 @@ EVALUATION_TASKS = [
task=[
"Provide me the summary of the `text`, then read it to me before transcribing it and translating it in French.",
"Summarize `text`, read it out loud then transcribe the audio and translate it in French.",
"Read me a summary of the the `text` out loud. Transcribe this and translate it in French.",
"Read me a summary of the `text` out loud. Transcribe this and translate it in French.",
],
inputs=["text"],
answer="translator(transcriber(text_reader(summarizer(text))), src_lang='English', tgt_lang='French')",

View File

@ -39,7 +39,7 @@ def find_adapter_config_file(
_commit_hash: Optional[str] = None,
) -> Optional[str]:
r"""
Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path the the adapter
Simply checks if the model stored on the Hub or locally is an adapter model or not, return the path of the adapter
config file if it is, None otherwise.
Args:

View File

@ -178,7 +178,7 @@ class GPTQTest(unittest.TestCase):
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
if self.device_map is None:
self.check_inference_correctness(self.quantized_model.to(0))
@ -290,7 +290,7 @@ class GPTQTestActOrderExllama(unittest.TestCase):
def test_generate_quality(self):
"""
Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens
Simple test to check the quality of the model by comparing the generated tokens with the expected tokens
"""
self.check_inference_correctness(self.quantized_model)