mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-01 02:31:11 +06:00
Set cache_dir
for evaluate.load()
in example scripts (#28422)
While using `run_clm.py`,[^1] I noticed that some files were being added
to my global cache, not the local cache. I set the `cache_dir` parameter
for the one call to `evaluate.load()`, which partially solved the
problem. I figured that while I was fixing the one script upstream, I
might as well fix the problem in all other example scripts that I could.
There are still some files being added to my global cache, but this
appears to be a bug in `evaluate` itself. This commit at least moves
some of the files into the local cache, which is better than before.
To create this PR, I made the following regex-based transformation:
`evaluate\.load\((.*?)\)` -> `evaluate\.load\($1,
cache_dir=model_args.cache_dir\)`. After using that, I manually fixed
all modified files with `ruff` serving as useful guidance. During the
process, I removed one existing usage of the `cache_dir` parameter in a
script that did not have a corresponding `--cache-dir` argument
declared.
[^1]: I specifically used `pytorch/language-modeling/run_clm.py` from
v4.34.1 of the library. For the original code, see the following URL:
acc394c4f5/examples/pytorch/language-modeling/run_clm.py
.
This commit is contained in:
parent
5fd5ef7624
commit
95091e1582
@ -853,7 +853,7 @@ def main():
|
|||||||
yield batch
|
yield batch
|
||||||
|
|
||||||
# Metric
|
# Metric
|
||||||
metric = evaluate.load("rouge")
|
metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def postprocess_text(preds, labels):
|
def postprocess_text(preds, labels):
|
||||||
preds = [pred.strip() for pred in preds]
|
preds = [pred.strip() for pred in preds]
|
||||||
|
@ -807,7 +807,9 @@ def main():
|
|||||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||||
|
|
||||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
metric = evaluate.load(
|
||||||
|
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||||
|
@ -577,7 +577,7 @@ def main():
|
|||||||
return
|
return
|
||||||
|
|
||||||
# 8. Load Metric
|
# 8. Load Metric
|
||||||
metric = evaluate.load("wer")
|
metric = evaluate.load("wer", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(preds, labels):
|
def compute_metrics(preds, labels):
|
||||||
# replace padded labels by the padding token
|
# replace padded labels by the padding token
|
||||||
|
@ -710,7 +710,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Metric
|
# Metric
|
||||||
metric = evaluate.load("rouge")
|
metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def postprocess_text(preds, labels):
|
def postprocess_text(preds, labels):
|
||||||
preds = [pred.strip() for pred in preds]
|
preds = [pred.strip() for pred in preds]
|
||||||
|
@ -599,9 +599,9 @@ def main():
|
|||||||
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
||||||
|
|
||||||
if data_args.task_name is not None:
|
if data_args.task_name is not None:
|
||||||
metric = evaluate.load("glue", data_args.task_name)
|
metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
|
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
|
||||||
train_time = 0
|
train_time = 0
|
||||||
|
@ -676,7 +676,7 @@ def main():
|
|||||||
|
|
||||||
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
||||||
|
|
||||||
metric = evaluate.load("seqeval")
|
metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def get_labels(y_pred, y_true):
|
def get_labels(y_pred, y_true):
|
||||||
# Transform predictions and references tensos to numpy arrays
|
# Transform predictions and references tensos to numpy arrays
|
||||||
|
@ -349,7 +349,7 @@ def main():
|
|||||||
id2label[str(i)] = label
|
id2label[str(i)] = label
|
||||||
|
|
||||||
# Load the accuracy metric from the datasets package
|
# Load the accuracy metric from the datasets package
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
|
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
|
||||||
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
|
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
|
||||||
|
@ -287,7 +287,7 @@ def main():
|
|||||||
id2label[str(i)] = label
|
id2label[str(i)] = label
|
||||||
|
|
||||||
# Load the accuracy metric from the datasets package
|
# Load the accuracy metric from the datasets package
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||||
|
@ -282,7 +282,6 @@ def main():
|
|||||||
dataset = load_dataset(
|
dataset = load_dataset(
|
||||||
"imagefolder",
|
"imagefolder",
|
||||||
data_files=data_files,
|
data_files=data_files,
|
||||||
cache_dir=args.cache_dir,
|
|
||||||
task="image-classification",
|
task="image-classification",
|
||||||
)
|
)
|
||||||
# See more about loading custom images at
|
# See more about loading custom images at
|
||||||
|
@ -583,7 +583,7 @@ def main():
|
|||||||
logits = logits[0]
|
logits = logits[0]
|
||||||
return logits.argmax(dim=-1)
|
return logits.argmax(dim=-1)
|
||||||
|
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(eval_preds):
|
def compute_metrics(eval_preds):
|
||||||
preds, labels = eval_preds
|
preds, labels = eval_preds
|
||||||
|
@ -590,7 +590,7 @@ def main():
|
|||||||
logits = logits[0]
|
logits = logits[0]
|
||||||
return logits.argmax(dim=-1)
|
return logits.argmax(dim=-1)
|
||||||
|
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(eval_preds):
|
def compute_metrics(eval_preds):
|
||||||
preds, labels = eval_preds
|
preds, labels = eval_preds
|
||||||
|
@ -627,7 +627,9 @@ def main():
|
|||||||
references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples]
|
references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples]
|
||||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||||
|
|
||||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
metric = evaluate.load(
|
||||||
|
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||||
|
@ -647,7 +647,9 @@ def main():
|
|||||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||||
|
|
||||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
metric = evaluate.load(
|
||||||
|
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||||
|
@ -631,7 +631,9 @@ def main():
|
|||||||
pad_to_multiple_of=8 if training_args.fp16 else None,
|
pad_to_multiple_of=8 if training_args.fp16 else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
metric = evaluate.load(
|
||||||
|
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||||
|
@ -366,7 +366,7 @@ def main():
|
|||||||
label2id = {v: str(k) for k, v in id2label.items()}
|
label2id = {v: str(k) for k, v in id2label.items()}
|
||||||
|
|
||||||
# Load the mean IoU metric from the datasets package
|
# Load the mean IoU metric from the datasets package
|
||||||
metric = evaluate.load("mean_iou")
|
metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||||
|
@ -530,7 +530,7 @@ def main():
|
|||||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||||
|
|
||||||
# Instantiate metric
|
# Instantiate metric
|
||||||
metric = evaluate.load("mean_iou")
|
metric = evaluate.load("mean_iou", cache_dir=args.cache_dir)
|
||||||
|
|
||||||
# We need to initialize the trackers we use, and also store our configuration.
|
# We need to initialize the trackers we use, and also store our configuration.
|
||||||
# The trackers initializes automatically on the main process.
|
# The trackers initializes automatically on the main process.
|
||||||
|
@ -680,7 +680,7 @@ def main():
|
|||||||
# instantiate a data collator and the trainer
|
# instantiate a data collator and the trainer
|
||||||
|
|
||||||
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
||||||
eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics}
|
eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
|
||||||
|
|
||||||
# for large datasets it is advised to run the preprocessing on a
|
# for large datasets it is advised to run the preprocessing on a
|
||||||
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
||||||
|
@ -702,7 +702,7 @@ def main():
|
|||||||
# instantiate a data collator and the trainer
|
# instantiate a data collator and the trainer
|
||||||
|
|
||||||
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
||||||
eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics}
|
eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
|
||||||
|
|
||||||
# for large datasets it is advised to run the preprocessing on a
|
# for large datasets it is advised to run the preprocessing on a
|
||||||
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
||||||
|
@ -520,7 +520,7 @@ def main():
|
|||||||
return
|
return
|
||||||
|
|
||||||
# 8. Load Metric
|
# 8. Load Metric
|
||||||
metric = evaluate.load("wer")
|
metric = evaluate.load("wer", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(pred):
|
def compute_metrics(pred):
|
||||||
pred_ids = pred.predictions
|
pred_ids = pred.predictions
|
||||||
|
@ -645,7 +645,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Metric
|
# Metric
|
||||||
metric = evaluate.load("rouge")
|
metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def postprocess_text(preds, labels):
|
def postprocess_text(preds, labels):
|
||||||
preds = [pred.strip() for pred in preds]
|
preds = [pred.strip() for pred in preds]
|
||||||
|
@ -633,23 +633,23 @@ def main():
|
|||||||
|
|
||||||
if data_args.metric_name is not None:
|
if data_args.metric_name is not None:
|
||||||
metric = (
|
metric = (
|
||||||
evaluate.load(data_args.metric_name, config_name="multilabel")
|
evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir)
|
||||||
if is_multi_label
|
if is_multi_label
|
||||||
else evaluate.load(data_args.metric_name)
|
else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir)
|
||||||
)
|
)
|
||||||
logger.info(f"Using metric {data_args.metric_name} for evaluation.")
|
logger.info(f"Using metric {data_args.metric_name} for evaluation.")
|
||||||
else:
|
else:
|
||||||
if is_regression:
|
if is_regression:
|
||||||
metric = evaluate.load("mse")
|
metric = evaluate.load("mse", cache_dir=model_args.cache_dir)
|
||||||
logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.")
|
logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.")
|
||||||
else:
|
else:
|
||||||
if is_multi_label:
|
if is_multi_label:
|
||||||
metric = evaluate.load("f1", config_name="multilabel")
|
metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir)
|
||||||
logger.info(
|
logger.info(
|
||||||
"Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite."
|
"Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite."
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.")
|
logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.")
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
|
@ -514,11 +514,11 @@ def main():
|
|||||||
|
|
||||||
# Get the metric function
|
# Get the metric function
|
||||||
if data_args.task_name is not None:
|
if data_args.task_name is not None:
|
||||||
metric = evaluate.load("glue", data_args.task_name)
|
metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
|
||||||
elif is_regression:
|
elif is_regression:
|
||||||
metric = evaluate.load("mse")
|
metric = evaluate.load("mse", cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||||
|
@ -385,7 +385,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Get the metric function
|
# Get the metric function
|
||||||
metric = evaluate.load("xnli")
|
metric = evaluate.load("xnli", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||||
|
@ -539,7 +539,7 @@ def main():
|
|||||||
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
|
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
|
||||||
|
|
||||||
# Metrics
|
# Metrics
|
||||||
metric = evaluate.load("seqeval")
|
metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(p):
|
def compute_metrics(p):
|
||||||
predictions, labels = p
|
predictions, labels = p
|
||||||
|
@ -564,7 +564,7 @@ def main():
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Metric
|
# Metric
|
||||||
metric = evaluate.load("sacrebleu")
|
metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def postprocess_text(preds, labels):
|
def postprocess_text(preds, labels):
|
||||||
preds = [pred.strip() for pred in preds]
|
preds = [pred.strip() for pred in preds]
|
||||||
|
@ -440,7 +440,7 @@ def main():
|
|||||||
collate_fn = DefaultDataCollator(return_tensors="np")
|
collate_fn = DefaultDataCollator(return_tensors="np")
|
||||||
|
|
||||||
# Load the accuracy metric from the datasets package
|
# Load the accuracy metric from the datasets package
|
||||||
metric = evaluate.load("accuracy")
|
metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||||
|
@ -631,7 +631,9 @@ def main():
|
|||||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||||
|
|
||||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
metric = evaluate.load(
|
||||||
|
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
|
|
||||||
def compute_metrics(p: EvalPrediction):
|
def compute_metrics(p: EvalPrediction):
|
||||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||||
|
@ -627,7 +627,7 @@ def main():
|
|||||||
|
|
||||||
# region Metric and KerasMetricCallback
|
# region Metric and KerasMetricCallback
|
||||||
if training_args.do_eval:
|
if training_args.do_eval:
|
||||||
metric = evaluate.load("rouge")
|
metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
if data_args.val_max_target_length is None:
|
if data_args.val_max_target_length is None:
|
||||||
data_args.val_max_target_length = data_args.max_target_length
|
data_args.val_max_target_length = data_args.max_target_length
|
||||||
|
@ -379,7 +379,7 @@ def main():
|
|||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
# region Metric function
|
# region Metric function
|
||||||
metric = evaluate.load("glue", data_args.task_name)
|
metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def compute_metrics(preds, label_ids):
|
def compute_metrics(preds, label_ids):
|
||||||
preds = preds["logits"]
|
preds = preds["logits"]
|
||||||
|
@ -511,7 +511,7 @@ def main():
|
|||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
# Metrics
|
# Metrics
|
||||||
metric = evaluate.load("seqeval")
|
metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
def get_labels(y_pred, y_true):
|
def get_labels(y_pred, y_true):
|
||||||
# Transform predictions and references tensos to numpy arrays
|
# Transform predictions and references tensos to numpy arrays
|
||||||
|
@ -589,7 +589,7 @@ def main():
|
|||||||
|
|
||||||
# region Metric and postprocessing
|
# region Metric and postprocessing
|
||||||
if training_args.do_eval:
|
if training_args.do_eval:
|
||||||
metric = evaluate.load("sacrebleu")
|
metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir)
|
||||||
|
|
||||||
if data_args.val_max_target_length is None:
|
if data_args.val_max_target_length is None:
|
||||||
data_args.val_max_target_length = data_args.max_target_length
|
data_args.val_max_target_length = data_args.max_target_length
|
||||||
|
Loading…
Reference in New Issue
Block a user