Super tiny fix 12 typos about "with with" (#29926)

* with with

* style
This commit is contained in:
fzyzcjy 2024-03-29 22:31:31 +08:00 committed by GitHub
parent 43d17c1836
commit 5ad7f17002
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 13 additions and 21 deletions

View File

@ -24,7 +24,7 @@ This model was contributed by [Connor Henderson](https://huggingface.co/connor-h
## 🤗 Model Architecture
FastSpeech2's general structure with a Mel-spectrogram decoder was implemented, and the traditional transformer blocks were replaced with with conformer blocks as done in the ESPnet library.
FastSpeech2's general structure with a Mel-spectrogram decoder was implemented, and the traditional transformer blocks were replaced with conformer blocks as done in the ESPnet library.
#### FastSpeech2 Model Architecture
![FastSpeech2 Model Architecture](https://www.microsoft.com/en-us/research/uploads/prod/2021/04/fastspeech2-1.png)

View File

@ -400,7 +400,7 @@ class AlignVisionExpansionLayer(nn.Module):
return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with with EfficientNet->AlignVision
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with EfficientNet->AlignVision
class AlignVisionDepthwiseLayer(nn.Module):
r"""
This corresponds to the depthwise convolution phase of each block in the original implementation.
@ -440,7 +440,7 @@ class AlignVisionDepthwiseLayer(nn.Module):
return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with with EfficientNet->AlignVision
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
class AlignVisionSqueezeExciteLayer(nn.Module):
r"""
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.

View File

@ -2093,7 +2093,7 @@ class BartDecoderWrapper(BartPreTrainedModel):
@add_start_docstrings(
"""
BART decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings).
BART decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
""",
BART_START_DOCSTRING,
)

View File

@ -154,8 +154,7 @@ class OpenLlamaConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -177,8 +177,7 @@ class FalconConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -199,8 +199,7 @@ class FuyuConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -167,8 +167,7 @@ class GPTNeoXConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -179,8 +179,7 @@ class LlamaConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -151,8 +151,7 @@ class PersimmonConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -179,8 +179,7 @@ class PhiConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -168,8 +168,7 @@ class StableLmConfig(PretrainedConfig):
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
f"got {self.rope_scaling}"
"`rope_scaling` must be a dictionary with two fields, `type` and `factor`, " f"got {self.rope_scaling}"
)
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_factor = self.rope_scaling.get("factor", None)

View File

@ -1861,7 +1861,7 @@ class WhisperDecoderWrapper(WhisperPreTrainedModel):
@add_start_docstrings(
"""
Whisper decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings).
Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
""",
WHISPER_START_DOCSTRING,
)