mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
chore: fix typos (#31559)
Signed-off-by: snoppy <michaleli@foxmail.com>
This commit is contained in:
parent
dce253f645
commit
0dd65a0319
@ -123,7 +123,7 @@ caption = image_captioner(image)
|
||||
```<end_action>
|
||||
|
||||
---
|
||||
Above example were using tools that might not exist for you. You only have acces to those Tools:
|
||||
Above example were using tools that might not exist for you. You only have access to those Tools:
|
||||
<<tool_names>>
|
||||
|
||||
Remember to make sure that variables you use are all defined.
|
||||
@ -145,7 +145,7 @@ The $ACTION_JSON_BLOB should only contain a SINGLE action, do NOT return a list
|
||||
"action_input": $INPUT
|
||||
}<end_action>
|
||||
|
||||
Make sure to have the $INPUT as a dictionnary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.
|
||||
Make sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.
|
||||
|
||||
You should ALWAYS use the following format:
|
||||
|
||||
@ -250,7 +250,7 @@ Action:
|
||||
}<end_action>
|
||||
|
||||
|
||||
Above example were using notional tools that might not exist for you. You only have acces to those tools:
|
||||
Above example were using notional tools that might not exist for you. You only have access to those tools:
|
||||
<<tool_descriptions>>
|
||||
|
||||
Here are the rules you should always follow to solve your task:
|
||||
|
@ -628,7 +628,7 @@ def evaluate_ast(
|
||||
|
||||
Args:
|
||||
expression (`ast.AST`):
|
||||
The code to evaluate, as an abastract syntax tree.
|
||||
The code to evaluate, as an abstract syntax tree.
|
||||
state (`Dict[str, Any]`):
|
||||
A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation
|
||||
encounters assignements.
|
||||
@ -640,7 +640,7 @@ def evaluate_ast(
|
||||
Add more at your own risk!
|
||||
"""
|
||||
if isinstance(expression, ast.Assign):
|
||||
# Assignement -> we evaluate the assignement which should update the state
|
||||
# Assignement -> we evaluate the assignment which should update the state
|
||||
# We return the variable assigned as it may be used to determine the final result.
|
||||
return evaluate_assign(expression, state, tools)
|
||||
elif isinstance(expression, ast.AugAssign):
|
||||
|
@ -1074,7 +1074,7 @@ def stft(frames: np.array, windowing_function: np.array, fft_window_size: int =
|
||||
frames (`np.array` of dimension `(num_frames, fft_window_size)`):
|
||||
A framed audio signal obtained using `audio_utils.fram_wav`.
|
||||
windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`:
|
||||
A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the
|
||||
A array representing the function that will be used to reduces the amplitude of the discontinuities at the
|
||||
boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function.
|
||||
For more information on the discontinuities, called *Spectral leakage*, refer to [this
|
||||
tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf
|
||||
|
@ -214,7 +214,7 @@ class QuantizedCacheConfig(CacheConfig):
|
||||
compute_dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):
|
||||
The defualt dtype used for computations in the model. Keys and Values will be cast to this dtype after dequantization.
|
||||
device (`str`, *optional*, defaults to `"cpu"`):
|
||||
Device on which to peform computations, should be same as the model's device.
|
||||
Device on which to perform computations, should be same as the model's device.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
|
@ -476,7 +476,7 @@ class FlaxNoRepeatNGramLogitsProcessor(FlaxLogitsProcessor):
|
||||
def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int):
|
||||
"""
|
||||
get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that
|
||||
represent the n-grams that occured previously.
|
||||
represent the n-grams that occurred previously.
|
||||
The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix
|
||||
"""
|
||||
batch_size, seq_len = input_ids.shape
|
||||
|
Loading…
Reference in New Issue
Block a user