mirror of
https://github.com/huggingface/transformers.git
synced 2025-08-02 03:01:07 +06:00
added cache_dir=model_args.cache_dir to all example with cache_dir arg (#11220)
This commit is contained in:
parent
3312e96bfb
commit
9fa2995993
@ -230,17 +230,19 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
if "validation" not in datasets.keys():
|
if "validation" not in datasets.keys():
|
||||||
datasets["validation"] = load_dataset(
|
datasets["validation"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
datasets["train"] = load_dataset(
|
datasets["train"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
@ -255,7 +257,7 @@ def main():
|
|||||||
)
|
)
|
||||||
if extension == "txt":
|
if extension == "txt":
|
||||||
extension = "text"
|
extension = "text"
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -239,17 +239,19 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
if "validation" not in datasets.keys():
|
if "validation" not in datasets.keys():
|
||||||
datasets["validation"] = load_dataset(
|
datasets["validation"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
datasets["train"] = load_dataset(
|
datasets["train"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
@ -260,7 +262,7 @@ def main():
|
|||||||
extension = data_args.train_file.split(".")[-1]
|
extension = data_args.train_file.split(".")[-1]
|
||||||
if extension == "txt":
|
if extension == "txt":
|
||||||
extension = "text"
|
extension = "text"
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -475,17 +475,19 @@ if __name__ == "__main__":
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
if "validation" not in datasets.keys():
|
if "validation" not in datasets.keys():
|
||||||
datasets["validation"] = load_dataset(
|
datasets["validation"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
datasets["train"] = load_dataset(
|
datasets["train"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
@ -496,7 +498,7 @@ if __name__ == "__main__":
|
|||||||
extension = data_args.train_file.split(".")[-1]
|
extension = data_args.train_file.split(".")[-1]
|
||||||
if extension == "txt":
|
if extension == "txt":
|
||||||
extension = "text"
|
extension = "text"
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -236,17 +236,19 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
if "validation" not in datasets.keys():
|
if "validation" not in datasets.keys():
|
||||||
datasets["validation"] = load_dataset(
|
datasets["validation"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
datasets["train"] = load_dataset(
|
datasets["train"] = load_dataset(
|
||||||
data_args.dataset_name,
|
data_args.dataset_name,
|
||||||
data_args.dataset_config_name,
|
data_args.dataset_config_name,
|
||||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||||
|
cache_dir=model_args.cache_dir,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
@ -257,7 +259,7 @@ def main():
|
|||||||
extension = data_args.train_file.split(".")[-1]
|
extension = data_args.train_file.split(".")[-1]
|
||||||
if extension == "txt":
|
if extension == "txt":
|
||||||
extension = "text"
|
extension = "text"
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -268,10 +268,10 @@ def main():
|
|||||||
if data_args.validation_file is not None:
|
if data_args.validation_file is not None:
|
||||||
data_files["validation"] = data_args.validation_file
|
data_files["validation"] = data_args.validation_file
|
||||||
extension = data_args.train_file.split(".")[-1]
|
extension = data_args.train_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
# Downloading and loading the swag dataset from the hub.
|
# Downloading and loading the swag dataset from the hub.
|
||||||
datasets = load_dataset("swag", "regular")
|
datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
if data_args.train_file is not None:
|
if data_args.train_file is not None:
|
||||||
@ -269,7 +269,7 @@ def main():
|
|||||||
if data_args.test_file is not None:
|
if data_args.test_file is not None:
|
||||||
data_files["test"] = data_args.test_file
|
data_files["test"] = data_args.test_file
|
||||||
extension = data_args.test_file.split(".")[-1]
|
extension = data_args.test_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files, field="data")
|
datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -255,7 +255,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
if data_args.train_file is not None:
|
if data_args.train_file is not None:
|
||||||
@ -267,7 +267,7 @@ def main():
|
|||||||
if data_args.test_file is not None:
|
if data_args.test_file is not None:
|
||||||
data_files["test"] = data_args.test_file
|
data_files["test"] = data_args.test_file
|
||||||
extension = data_args.test_file.split(".")[-1]
|
extension = data_args.test_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files, field="data")
|
datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -310,7 +310,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
if data_args.train_file is not None:
|
if data_args.train_file is not None:
|
||||||
@ -322,7 +322,7 @@ def main():
|
|||||||
if data_args.test_file is not None:
|
if data_args.test_file is not None:
|
||||||
data_files["test"] = data_args.test_file
|
data_files["test"] = data_args.test_file
|
||||||
extension = data_args.test_file.split(".")[-1]
|
extension = data_args.test_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -294,7 +294,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
if data_args.train_file is not None:
|
if data_args.train_file is not None:
|
||||||
@ -306,7 +306,7 @@ def main():
|
|||||||
if data_args.test_file is not None:
|
if data_args.test_file is not None:
|
||||||
data_files["test"] = data_args.test_file
|
data_files["test"] = data_args.test_file
|
||||||
extension = data_args.test_file.split(".")[-1]
|
extension = data_args.test_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.task_name is not None:
|
if data_args.task_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset("glue", data_args.task_name)
|
datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
# Loading a dataset from your local files.
|
# Loading a dataset from your local files.
|
||||||
# CSV/JSON training and evaluation files are needed.
|
# CSV/JSON training and evaluation files are needed.
|
||||||
@ -263,10 +263,10 @@ def main():
|
|||||||
|
|
||||||
if data_args.train_file.endswith(".csv"):
|
if data_args.train_file.endswith(".csv"):
|
||||||
# Loading a dataset from local csv files
|
# Loading a dataset from local csv files
|
||||||
datasets = load_dataset("csv", data_files=data_files)
|
datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
# Loading a dataset from local json files
|
# Loading a dataset from local json files
|
||||||
datasets = load_dataset("json", data_files=data_files)
|
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset at
|
# See more about loading any type of standard or custom dataset at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
@ -209,17 +209,19 @@ def main():
|
|||||||
# Downloading and loading xnli dataset from the hub.
|
# Downloading and loading xnli dataset from the hub.
|
||||||
if training_args.do_train:
|
if training_args.do_train:
|
||||||
if model_args.train_language is None:
|
if model_args.train_language is None:
|
||||||
train_dataset = load_dataset("xnli", model_args.language, split="train")
|
train_dataset = load_dataset("xnli", model_args.language, split="train", cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
train_dataset = load_dataset("xnli", model_args.train_language, split="train")
|
train_dataset = load_dataset(
|
||||||
|
"xnli", model_args.train_language, split="train", cache_dir=model_args.cache_dir
|
||||||
|
)
|
||||||
label_list = train_dataset.features["label"].names
|
label_list = train_dataset.features["label"].names
|
||||||
|
|
||||||
if training_args.do_eval:
|
if training_args.do_eval:
|
||||||
eval_dataset = load_dataset("xnli", model_args.language, split="validation")
|
eval_dataset = load_dataset("xnli", model_args.language, split="validation", cache_dir=model_args.cache_dir)
|
||||||
label_list = eval_dataset.features["label"].names
|
label_list = eval_dataset.features["label"].names
|
||||||
|
|
||||||
if training_args.do_predict:
|
if training_args.do_predict:
|
||||||
test_dataset = load_dataset("xnli", model_args.language, split="test")
|
test_dataset = load_dataset("xnli", model_args.language, split="test", cache_dir=model_args.cache_dir)
|
||||||
label_list = test_dataset.features["label"].names
|
label_list = test_dataset.features["label"].names
|
||||||
|
|
||||||
# Labels
|
# Labels
|
||||||
|
@ -229,7 +229,7 @@ def main():
|
|||||||
# download the dataset.
|
# download the dataset.
|
||||||
if data_args.dataset_name is not None:
|
if data_args.dataset_name is not None:
|
||||||
# Downloading and loading a dataset from the hub.
|
# Downloading and loading a dataset from the hub.
|
||||||
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
|
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
|
||||||
else:
|
else:
|
||||||
data_files = {}
|
data_files = {}
|
||||||
if data_args.train_file is not None:
|
if data_args.train_file is not None:
|
||||||
@ -239,7 +239,7 @@ def main():
|
|||||||
if data_args.test_file is not None:
|
if data_args.test_file is not None:
|
||||||
data_files["test"] = data_args.test_file
|
data_files["test"] = data_args.test_file
|
||||||
extension = data_args.train_file.split(".")[-1]
|
extension = data_args.train_file.split(".")[-1]
|
||||||
datasets = load_dataset(extension, data_files=data_files)
|
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
|
||||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user