From 7ccacdf10fcba7edc8a7e19ce9c46d1a38b74a99 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 5 Apr 2022 14:15:02 +0200 Subject: [PATCH] [Doctests] Correct filenaming (#16599) * [Doctests] Correct filenaming * improve quicktour * make style --- docs/source/en/quicktour.mdx | 14 +++++++------- docs/source/es/quicktour.mdx | 13 ++++++------- utils/documentation_tests.txt | 18 +++--------------- 3 files changed, 16 insertions(+), 29 deletions(-) diff --git a/docs/source/en/quicktour.mdx b/docs/source/en/quicktour.mdx index 1fc4f8b865d..0d7edd63070 100644 --- a/docs/source/en/quicktour.mdx +++ b/docs/source/en/quicktour.mdx @@ -115,23 +115,23 @@ Create a [`pipeline`] with the task you want to solve for and the model you want >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` -Next, load a dataset (see the 馃 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) for more details) you'd like to iterate over. For example, let's load the [SUPERB](https://huggingface.co/datasets/superb) dataset: +Next, load a dataset (see the 馃 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) for more details) you'd like to iterate over. For example, let's load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset: ```py >>> import datasets ->>> dataset = datasets.load_dataset("superb", name="asr", split="test") # doctest: +IGNORE_RESULT +>>> dataset = datasets.load_dataset("minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` You can pass a whole dataset pipeline: ```py ->>> files = dataset["file"] +>>> files = dataset["path"] >>> speech_recognizer(files[:4]) -[{'text': 'HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED FLOWER FAT AND SAUCE'}, - {'text': 'STUFFERED INTO YOU HIS BELLY COUNSELLED HIM'}, - {'text': 'AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE BROTHELS'}, - {'text': 'HO BERTIE ANY GOOD IN YOUR MIND'}] +[{'text': 'I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT'}, + {'text': "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE"}, + {'text': "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS"}, + {'text': 'HOW DO I FURN A JOINA COUT'}] ``` For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the [pipeline documentation](./main_classes/pipelines) for more information. diff --git a/docs/source/es/quicktour.mdx b/docs/source/es/quicktour.mdx index 8b400867099..67ed7e7bb5c 100644 --- a/docs/source/es/quicktour.mdx +++ b/docs/source/es/quicktour.mdx @@ -115,23 +115,22 @@ Crea un [`pipeline`] con la tarea que deseas resolver y el modelo que quieres us >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) ``` -A continuaci贸n, carga el dataset (ve 馃 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) para m谩s detalles) sobre el que quisieras iterar. Por ejemplo, vamos a cargar el dataset [SUPERB](https://huggingface.co/datasets/superb): +A continuaci贸n, carga el dataset (ve 馃 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) para m谩s detalles) sobre el que quisieras iterar. Por ejemplo, vamos a cargar el dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> import datasets ->>> dataset = datasets.load_dataset("superb", name="asr", split="test") # doctest: +IGNORE_RESULT +>>> dataset = datasets.load_dataset("minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Puedes pasar un pipeline para un dataset: ```py ->>> files = dataset["file"] +>>> files = dataset["path"] >>> speech_recognizer(files[:4]) -[{'text': 'HE HOPED THERE WOULD BE STEW FOR DINNER TURNIPS AND CARROTS AND BRUISED POTATOES AND FAT MUTTON PIECES TO BE LADLED OUT IN THICK PEPPERED FLOWER FAT AND SAUCE'}, - {'text': 'STUFFERED INTO YOU HIS BELLY COUNSELLED HIM'}, - {'text': 'AFTER EARLY NIGHTFALL THE YELLOW LAMPS WOULD LIGHT UP HERE AND THERE THE SQUALID QUARTER OF THE BROTHELS'}, - {'text': 'HO BERTIE ANY GOOD IN YOUR MIND'}] +[{'text': 'I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT'}, + {'text': "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE"}, + {'text': "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS"}, ``` Para un dataset m谩s grande, donde los inputs son de mayor tama帽o (como en habla/audio o visi贸n), querr谩s pasar un generador en lugar de una lista que carga todos los inputs en memoria. Ve la [documentaci贸n del pipeline](./main_classes/pipelines) para m谩s informaci贸n. diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 372e63ad232..f88974ed434 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -1,17 +1,10 @@ -docs/source/quicktour.mdx -docs/source/quicktour.mdx -docs/source/task_summary.mdx -docs/source/task_summary.mdx +docs/source/en/quicktour.mdx +docs/source/en/task_summary.mdx src/transformers/generation_utils.py -src/transformers/generation_utils.py -src/transformers/models/bart/modeling_bart.py src/transformers/models/bart/modeling_bart.py src/transformers/models/beit/modeling_beit.py src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py -src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py src/transformers/models/blenderbot/modeling_blenderbot.py -src/transformers/models/blenderbot/modeling_blenderbot.py -src/transformers/models/blenderbot_small/modeling_blenderbot_small.py src/transformers/models/blenderbot_small/modeling_blenderbot_small.py src/transformers/models/convnext/modeling_convnext.py src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -20,16 +13,11 @@ src/transformers/models/dpt/modeling_dpt.py src/transformers/models/glpn/modeling_glpn.py src/transformers/models/hubert/modeling_hubert.py src/transformers/models/marian/modeling_marian.py -src/transformers/models/marian/modeling_marian.py -src/transformers/models/mbart/modeling_mbart.py src/transformers/models/mbart/modeling_mbart.py src/transformers/models/pegasus/modeling_pegasus.py -src/transformers/models/pegasus/modeling_pegasus.py -src/transformers/models/plbart/modeling_plbart.py src/transformers/models/plbart/modeling_plbart.py src/transformers/models/poolformer/modeling_poolformer.py src/transformers/models/resnet/modeling_resnet.py -src/transformers/models/resnet/modeling_resnet.py src/transformers/models/roberta/modeling_roberta.py src/transformers/models/roberta/modeling_tf_roberta.py src/transformers/models/segformer/modeling_segformer.py @@ -50,4 +38,4 @@ src/transformers/models/vit_mae/modeling_vit_mae.py src/transformers/models/wav2vec2/modeling_wav2vec2.py src/transformers/models/wav2vec2/tokenization_wav2vec2.py src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py -src/transformers/models/wavlm/modeling_wavlm.py \ No newline at end of file +src/transformers/models/wavlm/modeling_wavlm.py