mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-04 13:20:12 +06:00
Add Slack notification support for doc tests (#16253)
* up * up * up * fix * yeh * ups * Empty test commit * correct quicktour * correct * correct * up * up * uP * uP * up * up * uP * up * up * up * up * up * up * up * up * up * up * Update src/transformers/models/van/modeling_van.py * finish * apply suggestions * remove folder * revert to daily testing
This commit is contained in:
parent
319cbbe191
commit
c1af180dfe
41
.github/workflows/doctests.yml
vendored
41
.github/workflows/doctests.yml
vendored
@ -26,32 +26,55 @@ jobs:
|
|||||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
with:
|
|
||||||
repository: 'huggingface/transformers'
|
|
||||||
path: transformers
|
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
|
||||||
- name: GPU visibility
|
- name: GPU visibility
|
||||||
working-directory: transformers
|
|
||||||
run: |
|
run: |
|
||||||
utils/print_env_pt.py
|
utils/print_env_pt.py
|
||||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))"
|
||||||
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
TF_CPP_MIN_LOG_LEVEL=3 python3 -c "import tensorflow as tf; print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))"
|
||||||
|
|
||||||
- name: Prepare files for doctests
|
- name: Prepare files for doctests
|
||||||
working-directory: transformers
|
|
||||||
run: |
|
run: |
|
||||||
python3 utils/prepare_for_doc_test.py src docs
|
python3 utils/prepare_for_doc_test.py src docs
|
||||||
|
|
||||||
- name: Run doctests
|
- name: Run doctests
|
||||||
working-directory: transformers
|
|
||||||
run: |
|
run: |
|
||||||
python3 -m pytest --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
python3 -m pytest -v --make-reports doc_tests_gpu --doctest-modules $(cat utils/documentation_tests.txt) -sv --doctest-continue-on-failure --doctest-glob="*.mdx"
|
||||||
|
|
||||||
- name: Clean files after doctests
|
- name: Clean files after doctests
|
||||||
working-directory: transformers
|
|
||||||
run: |
|
run: |
|
||||||
python3 utils/prepare_for_doc_test.py src docs --remove_new_line
|
python3 utils/prepare_for_doc_test.py src docs --remove_new_line
|
||||||
|
|
||||||
|
- name: Failure short reports
|
||||||
|
if: ${{ failure() }}
|
||||||
|
continue-on-error: true
|
||||||
|
run: cat reports/doc_tests_gpu/failures_short.txt
|
||||||
|
|
||||||
|
- name: Test suite reports artifacts
|
||||||
|
if: ${{ always() }}
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: doc_tests_gpu_test_reports
|
||||||
|
path: reports/doc_tests_gpu
|
||||||
|
|
||||||
|
|
||||||
|
send_results:
|
||||||
|
name: Send results to webhook
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: always()
|
||||||
|
needs: [run_doctests]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- uses: actions/download-artifact@v2
|
||||||
|
- name: Send message to Slack
|
||||||
|
env:
|
||||||
|
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
|
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }}
|
||||||
|
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }}
|
||||||
|
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||||
|
run: |
|
||||||
|
pip install slack_sdk
|
||||||
|
python utils/notification_service_doc_tests.py
|
||||||
|
@ -308,10 +308,7 @@ The model outputs the final activations in the `logits` attribute. Apply the sof
|
|||||||
>>> import tensorflow as tf
|
>>> import tensorflow as tf
|
||||||
|
|
||||||
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
|
>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
|
||||||
>>> print(tf.math.round(tf_predictions * 10**4) / 10**4)
|
>>> tf_predictions # doctest: +IGNORE_RESULT
|
||||||
tf.Tensor(
|
|
||||||
[[0.0021 0.0018 0.0116 0.2121 0.7725]
|
|
||||||
[0.2084 0.1826 0.1969 0.1755 0.2365]], shape=(2, 5), dtype=float32)
|
|
||||||
```
|
```
|
||||||
</tf>
|
</tf>
|
||||||
</frameworkcontent>
|
</frameworkcontent>
|
||||||
|
@ -1,36 +1,36 @@
|
|||||||
|
docs/source/quicktour.mdx
|
||||||
|
docs/source/task_summary.mdx
|
||||||
|
src/transformers/generation_utils.py
|
||||||
|
src/transformers/models/bart/modeling_bart.py
|
||||||
|
src/transformers/models/beit/modeling_beit.py
|
||||||
|
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
|
||||||
|
src/transformers/models/blenderbot/modeling_blenderbot.py
|
||||||
|
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
||||||
|
src/transformers/models/convnext/modeling_convnext.py
|
||||||
|
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
||||||
|
src/transformers/models/deit/modeling_deit.py
|
||||||
|
src/transformers/models/hubert/modeling_hubert.py
|
||||||
|
src/transformers/models/marian/modeling_marian.py
|
||||||
|
src/transformers/models/mbart/modeling_mbart.py
|
||||||
|
src/transformers/models/pegasus/modeling_pegasus.py
|
||||||
|
src/transformers/models/plbart/modeling_plbart.py
|
||||||
|
src/transformers/models/poolformer/modeling_poolformer.py
|
||||||
|
src/transformers/models/resnet/modeling_resnet.py
|
||||||
|
src/transformers/models/segformer/modeling_segformer.py
|
||||||
|
src/transformers/models/sew/modeling_sew.py
|
||||||
|
src/transformers/models/sew_d/modeling_sew_d.py
|
||||||
|
src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
|
||||||
|
src/transformers/models/speech_to_text/modeling_speech_to_text.py
|
||||||
|
src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
|
||||||
|
src/transformers/models/swin/modeling_swin.py
|
||||||
|
src/transformers/models/unispeech/modeling_unispeech.py
|
||||||
|
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
|
||||||
|
src/transformers/models/van/modeling_van.py
|
||||||
|
src/transformers/models/vilt/modeling_vilt.py
|
||||||
|
src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py
|
||||||
|
src/transformers/models/vit/modeling_vit.py
|
||||||
|
src/transformers/models/vit_mae/modeling_vit_mae.py
|
||||||
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
||||||
src/transformers/models/wav2vec2/tokenization_wav2vec2.py
|
src/transformers/models/wav2vec2/tokenization_wav2vec2.py
|
||||||
src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py
|
src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py
|
||||||
src/transformers/models/hubert/modeling_hubert.py
|
|
||||||
src/transformers/models/wavlm/modeling_wavlm.py
|
src/transformers/models/wavlm/modeling_wavlm.py
|
||||||
src/transformers/models/unispeech/modeling_unispeech.py
|
|
||||||
src/transformers/models/unispeech_sat/modeling_unispeech_sat.py
|
|
||||||
src/transformers/models/sew/modeling_sew.py
|
|
||||||
src/transformers/models/sew_d/modeling_sew_d.py
|
|
||||||
src/transformers/models/speech_to_text_2/modeling_speech_to_text_2.py
|
|
||||||
src/transformers/models/speech_to_text/modeling_speech_to_text.py
|
|
||||||
src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py
|
|
||||||
src/transformers/models/data2vec/modeling_data2vec_audio.py
|
|
||||||
src/transformers/models/vit/modeling_vit.py
|
|
||||||
src/transformers/models/beit/modeling_beit.py
|
|
||||||
src/transformers/models/deit/modeling_deit.py
|
|
||||||
src/transformers/models/swin/modeling_swin.py
|
|
||||||
src/transformers/models/convnext/modeling_convnext.py
|
|
||||||
src/transformers/models/poolformer/modeling_poolformer.py
|
|
||||||
src/transformers/models/vit_mae/modeling_vit_mae.py
|
|
||||||
src/transformers/models/vilt/modeling_vilt.py
|
|
||||||
src/transformers/models/van/modeling_van.py
|
|
||||||
src/transformers/models/segformer/modeling_segformer.py
|
|
||||||
src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py
|
|
||||||
src/transformers/models/bart/modeling_bart.py
|
|
||||||
src/transformers/models/mbart/modeling_mbart.py
|
|
||||||
src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
|
|
||||||
src/transformers/models/marian/modeling_marian.py
|
|
||||||
src/transformers/models/pegasus/modeling_pegasus.py
|
|
||||||
src/transformers/models/blenderbot/modeling_blenderbot.py
|
|
||||||
src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
|
||||||
src/transformers/models/plbart/modeling_plbart.py
|
|
||||||
src/transformers/generation_utils.py
|
|
||||||
docs/source/quicktour.mdx
|
|
||||||
docs/source/task_summary.mdx
|
|
||||||
src/transformers/models/resnet/modeling_resnet.py
|
|
||||||
|
379
utils/notification_service_doc_tests.py
Normal file
379
utils/notification_service_doc_tests.py
Normal file
@ -0,0 +1,379 @@
|
|||||||
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import json
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from fnmatch import fnmatch
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from slack_sdk import WebClient
|
||||||
|
|
||||||
|
|
||||||
|
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
|
||||||
|
|
||||||
|
|
||||||
|
def handle_test_results(test_results):
|
||||||
|
expressions = test_results.split(" ")
|
||||||
|
|
||||||
|
failed = 0
|
||||||
|
success = 0
|
||||||
|
|
||||||
|
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
|
||||||
|
# When it is too long, those signs are not present.
|
||||||
|
time_spent = expressions[-2] if "=" in expressions[-1] else expressions[-1]
|
||||||
|
|
||||||
|
for i, expression in enumerate(expressions):
|
||||||
|
if "failed" in expression:
|
||||||
|
failed += int(expressions[i - 1])
|
||||||
|
if "passed" in expression:
|
||||||
|
success += int(expressions[i - 1])
|
||||||
|
|
||||||
|
return failed, success, time_spent
|
||||||
|
|
||||||
|
|
||||||
|
def extract_first_line_failure(failures_short_lines):
|
||||||
|
failures = {}
|
||||||
|
file = None
|
||||||
|
in_error = False
|
||||||
|
for line in failures_short_lines.split("\n"):
|
||||||
|
if re.search("_ \[doctest\]", line):
|
||||||
|
in_error = True
|
||||||
|
file = line.split(" ")[2]
|
||||||
|
elif in_error and not line.split(" ")[0].isdigit():
|
||||||
|
failures[file] = line
|
||||||
|
in_error = False
|
||||||
|
|
||||||
|
return failures
|
||||||
|
|
||||||
|
|
||||||
|
class Message:
|
||||||
|
def __init__(self, title: str, doc_test_results: Dict):
|
||||||
|
self.title = title
|
||||||
|
|
||||||
|
self._time_spent = doc_test_results["time_spent"].split(",")[0]
|
||||||
|
self.n_success = doc_test_results["success"]
|
||||||
|
self.n_failures = doc_test_results["failures"]
|
||||||
|
self.n_tests = self.n_success + self.n_failures
|
||||||
|
|
||||||
|
# Failures and success of the modeling tests
|
||||||
|
self.doc_test_results = doc_test_results
|
||||||
|
|
||||||
|
@property
|
||||||
|
def time(self) -> str:
|
||||||
|
time_spent = [self._time_spent]
|
||||||
|
total_secs = 0
|
||||||
|
|
||||||
|
for time in time_spent:
|
||||||
|
time_parts = time.split(":")
|
||||||
|
|
||||||
|
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
|
||||||
|
if len(time_parts) == 1:
|
||||||
|
time_parts = [0, 0, time_parts[0]]
|
||||||
|
|
||||||
|
hours, minutes, seconds = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
|
||||||
|
total_secs += hours * 3600 + minutes * 60 + seconds
|
||||||
|
|
||||||
|
hours, minutes, seconds = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
|
||||||
|
return f"{int(hours)}h{int(minutes)}m{int(seconds)}s"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def header(self) -> Dict:
|
||||||
|
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def no_failures(self) -> Dict:
|
||||||
|
return {
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "plain_text",
|
||||||
|
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
|
||||||
|
"emoji": True,
|
||||||
|
},
|
||||||
|
"accessory": {
|
||||||
|
"type": "button",
|
||||||
|
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
|
||||||
|
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def failures(self) -> Dict:
|
||||||
|
return {
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "plain_text",
|
||||||
|
"text": f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in {self.time}.",
|
||||||
|
"emoji": True,
|
||||||
|
},
|
||||||
|
"accessory": {
|
||||||
|
"type": "button",
|
||||||
|
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
|
||||||
|
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def category_failures(self) -> Dict:
|
||||||
|
line_length = 40
|
||||||
|
category_failures = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(v, dict)}
|
||||||
|
|
||||||
|
report = ""
|
||||||
|
for category, failures in category_failures.items():
|
||||||
|
if len(failures) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if report != "":
|
||||||
|
report += "\n\n"
|
||||||
|
|
||||||
|
report += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
|
||||||
|
report += "`"
|
||||||
|
report += "`\n`".join(failures)
|
||||||
|
report += "`"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "mrkdwn",
|
||||||
|
"text": f"The following examples had failures:\n\n\n{report}\n",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def payload(self) -> str:
|
||||||
|
blocks = [self.header]
|
||||||
|
|
||||||
|
if self.n_failures > 0:
|
||||||
|
blocks.append(self.failures)
|
||||||
|
|
||||||
|
if self.n_failures > 0:
|
||||||
|
blocks.extend([self.category_failures])
|
||||||
|
|
||||||
|
if self.no_failures == 0:
|
||||||
|
blocks.append(self.no_failures)
|
||||||
|
|
||||||
|
return json.dumps(blocks)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def error_out():
|
||||||
|
payload = [
|
||||||
|
{
|
||||||
|
"type": "section",
|
||||||
|
"text": {
|
||||||
|
"type": "plain_text",
|
||||||
|
"text": "There was an issue running the tests.",
|
||||||
|
},
|
||||||
|
"accessory": {
|
||||||
|
"type": "button",
|
||||||
|
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
|
||||||
|
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
print("Sending the following payload")
|
||||||
|
print(json.dumps({"blocks": json.loads(payload)}))
|
||||||
|
|
||||||
|
client.chat_postMessage(
|
||||||
|
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
|
||||||
|
text="There was an issue running the tests.",
|
||||||
|
blocks=payload,
|
||||||
|
)
|
||||||
|
|
||||||
|
def post(self):
|
||||||
|
print("Sending the following payload")
|
||||||
|
print(json.dumps({"blocks": json.loads(self.payload)}))
|
||||||
|
|
||||||
|
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
|
||||||
|
|
||||||
|
self.thread_ts = client.chat_postMessage(
|
||||||
|
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
|
||||||
|
blocks=self.payload,
|
||||||
|
text=text,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_reply_blocks(self, job_name, job_link, failures, text):
|
||||||
|
failures_text = ""
|
||||||
|
for key, value in failures.items():
|
||||||
|
value = value[:200] + " [Truncated]" if len(value) > 250 else value
|
||||||
|
failures_text += f"*{key}*\n_{value}_\n\n"
|
||||||
|
|
||||||
|
title = job_name
|
||||||
|
content = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
|
||||||
|
|
||||||
|
if job_link is not None:
|
||||||
|
content["accessory"] = {
|
||||||
|
"type": "button",
|
||||||
|
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
|
||||||
|
"url": job_link,
|
||||||
|
}
|
||||||
|
|
||||||
|
return [
|
||||||
|
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
|
||||||
|
content,
|
||||||
|
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
|
||||||
|
]
|
||||||
|
|
||||||
|
def post_reply(self):
|
||||||
|
if self.thread_ts is None:
|
||||||
|
raise ValueError("Can only post reply if a post has been made.")
|
||||||
|
|
||||||
|
job_link = self.doc_test_results.pop("job_link")
|
||||||
|
self.doc_test_results.pop("failures")
|
||||||
|
self.doc_test_results.pop("success")
|
||||||
|
self.doc_test_results.pop("time_spent")
|
||||||
|
|
||||||
|
sorted_dict = sorted(self.doc_test_results.items(), key=lambda t: t[0])
|
||||||
|
for job, job_result in sorted_dict:
|
||||||
|
if len(job_result["failures"]):
|
||||||
|
text = f"*Num failures* :{len(job_result['failed'])} \n"
|
||||||
|
failures = job_result["failures"]
|
||||||
|
blocks = self.get_reply_blocks(job, job_link, failures, text=text)
|
||||||
|
|
||||||
|
print("Sending the following reply")
|
||||||
|
print(json.dumps({"blocks": blocks}))
|
||||||
|
|
||||||
|
client.chat_postMessage(
|
||||||
|
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"],
|
||||||
|
text=f"Results for {job}",
|
||||||
|
blocks=blocks,
|
||||||
|
thread_ts=self.thread_ts["ts"],
|
||||||
|
)
|
||||||
|
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_job_links():
|
||||||
|
run_id = os.environ["GITHUB_RUN_ID"]
|
||||||
|
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
|
||||||
|
result = requests.get(url).json()
|
||||||
|
jobs = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
|
||||||
|
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
|
||||||
|
|
||||||
|
for i in range(pages_to_iterate_over):
|
||||||
|
result = requests.get(url + f"&page={i + 2}").json()
|
||||||
|
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]})
|
||||||
|
|
||||||
|
return jobs
|
||||||
|
except Exception as e:
|
||||||
|
print("Unknown error, could not fetch links.", e)
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def retrieve_artifact(name: str):
|
||||||
|
_artifact = {}
|
||||||
|
|
||||||
|
if os.path.exists(name):
|
||||||
|
files = os.listdir(name)
|
||||||
|
for file in files:
|
||||||
|
try:
|
||||||
|
with open(os.path.join(name, file)) as f:
|
||||||
|
_artifact[file.split(".")[0]] = f.read()
|
||||||
|
except UnicodeDecodeError as e:
|
||||||
|
raise ValueError(f"Could not open {os.path.join(name, file)}.") from e
|
||||||
|
|
||||||
|
return _artifact
|
||||||
|
|
||||||
|
|
||||||
|
def retrieve_available_artifacts():
|
||||||
|
class Artifact:
|
||||||
|
def __init__(self, name: str):
|
||||||
|
self.name = name
|
||||||
|
self.paths = []
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.name
|
||||||
|
|
||||||
|
def add_path(self, path: str):
|
||||||
|
self.paths.append({"name": self.name, "path": path})
|
||||||
|
|
||||||
|
_available_artifacts: Dict[str, Artifact] = {}
|
||||||
|
|
||||||
|
directories = filter(os.path.isdir, os.listdir())
|
||||||
|
for directory in directories:
|
||||||
|
artifact_name = directory
|
||||||
|
if artifact_name not in _available_artifacts:
|
||||||
|
_available_artifacts[artifact_name] = Artifact(artifact_name)
|
||||||
|
|
||||||
|
_available_artifacts[artifact_name].add_path(directory)
|
||||||
|
|
||||||
|
return _available_artifacts
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
github_actions_job_links = get_job_links()
|
||||||
|
available_artifacts = retrieve_available_artifacts()
|
||||||
|
|
||||||
|
docs = collections.OrderedDict(
|
||||||
|
[
|
||||||
|
("*.py", "API Examples"),
|
||||||
|
("*.mdx", "MDX Examples"),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# This dict will contain all the information relative to each doc test category:
|
||||||
|
# - failed: list of failed tests
|
||||||
|
# - failures: dict in the format 'test': 'error_message'
|
||||||
|
doc_test_results = {
|
||||||
|
v: {
|
||||||
|
"failed": [],
|
||||||
|
"failures": {},
|
||||||
|
}
|
||||||
|
for v in docs.values()
|
||||||
|
}
|
||||||
|
|
||||||
|
# Link to the GitHub Action job
|
||||||
|
doc_test_results["job_link"] = github_actions_job_links.get("run_doctests")
|
||||||
|
|
||||||
|
artifact_path = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
|
||||||
|
artifact = retrieve_artifact(artifact_path["name"])
|
||||||
|
if "stats" in artifact:
|
||||||
|
failed, success, time_spent = handle_test_results(artifact["stats"])
|
||||||
|
doc_test_results["failures"] = failed
|
||||||
|
doc_test_results["success"] = success
|
||||||
|
doc_test_results["time_spent"] = time_spent[1:-1] + ", "
|
||||||
|
|
||||||
|
all_failures = extract_first_line_failure(artifact["failures_short"])
|
||||||
|
for line in artifact["summary_short"].split("\n"):
|
||||||
|
if re.search("FAILED", line):
|
||||||
|
|
||||||
|
line = line.replace("FAILED ", "")
|
||||||
|
line = line.split()[0].replace("\n", "")
|
||||||
|
|
||||||
|
if "::" in line:
|
||||||
|
file_path, test = line.split("::")
|
||||||
|
else:
|
||||||
|
file_path, test = line, line
|
||||||
|
|
||||||
|
for file_regex in docs.keys():
|
||||||
|
if fnmatch(file_path, file_regex):
|
||||||
|
category = docs[file_regex]
|
||||||
|
doc_test_results[category]["failed"].append(test)
|
||||||
|
|
||||||
|
failure = all_failures[test] if test in all_failures else "N/A"
|
||||||
|
doc_test_results[category]["failures"][test] = failure
|
||||||
|
break
|
||||||
|
|
||||||
|
message = Message("🤗 Results of the doc tests.", doc_test_results)
|
||||||
|
message.post()
|
||||||
|
message.post_reply()
|
Loading…
Reference in New Issue
Block a user