[CIs] Better reports everywhere (#8275)

* make it possible to invoke testconf.py in both test suites without crashing on having the same option added

* perl -pi -e 's|--make_reports|--make-reports|' to be consistent with other opts

* add `pytest --make-reports` to all CIs (and artifacts)

* fix
This commit is contained in:
Stas Bekman 2020-11-03 13:57:12 -08:00 committed by GitHub
parent 7f556d2e39
commit 1bb4bba53c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 144 additions and 59 deletions

View File

@ -82,10 +82,12 @@ jobs:
key: v0.4-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: RUN_PT_TF_CROSS_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s ./tests/ -m is_pt_tf_cross_test --durations=0 | tee output.txt
- run: RUN_PT_TF_CROSS_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf ./tests/ -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_tests_torch:
working_directory: ~/transformers
docker:
@ -106,7 +108,7 @@ jobs:
key: v0.4-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --dist=loadfile -s --make_reports=tests ./tests/ | tee tests_output.txt
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=tests_torch ./tests/ | tee tests_output.txt
- store_artifacts:
path: ~/transformers/tests_output.txt
- store_artifacts:
@ -132,10 +134,12 @@ jobs:
key: v0.4-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --dist=loadfile -rA -s ./tests/ | tee output.txt
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_tf ./tests/ | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_tests_flax:
working_directory: ~/transformers
docker:
@ -156,10 +160,12 @@ jobs:
key: v0.4-flax-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --dist=loadfile -rA -s ./tests/ | tee output.txt
- run: python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_flax ./tests/ | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_tests_pipelines_torch:
working_directory: ~/transformers
docker:
@ -180,10 +186,12 @@ jobs:
key: v0.4-torch-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s ./tests/ -m is_pipeline_test | tee output.txt
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test ./tests/ | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_tests_pipelines_tf:
working_directory: ~/transformers
docker:
@ -204,10 +212,12 @@ jobs:
key: v0.4-tf-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s ./tests/ -m is_pipeline_test | tee output.txt
- run: RUN_PIPELINE_TESTS=1 python -m pytest -n 8 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf ./tests/ -m is_pipeline_test | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_tests_custom_tokenizers:
working_directory: ~/transformers
docker:
@ -227,10 +237,12 @@ jobs:
key: v0.4-custom_tokenizers-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -s ./tests/test_tokenization_bert_japanese.py | tee output.txt
- run: python -m pytest -s --make-reports=tests_custom_tokenizers ./tests/test_tokenization_bert_japanese.py | tee tests_output.txt
- store_artifacts:
path: ~/transformers/output.txt
destination: test_output.txt
path: ~/transformers/tests_output.txt
- store_artifacts:
path: ~/transformers/reports
run_examples_torch:
working_directory: ~/transformers
docker:
@ -252,11 +264,12 @@ jobs:
key: v0.4-torch_examples-{{ checksum "setup.py" }}
paths:
- '~/.cache/pip'
- run: python -m pytest -n 8 --dist=loadfile -s --make_reports=examples ./examples/ | tee examples_output.txt
- run: python -m pytest -n 8 --dist=loadfile -s --make-reports=examples_torch ./examples/ | tee examples_output.txt
- store_artifacts:
path: ~/transformers/examples_output.txt
- store_artifacts:
path: ~/transformers/reports
build_doc:
working_directory: ~/transformers
docker:
@ -276,6 +289,7 @@ jobs:
- run: cd docs && make html SPHINXOPTS="-W"
- store_artifacts:
path: ./docs/_build
deploy_doc:
working_directory: ~/transformers
docker:
@ -295,6 +309,7 @@ jobs:
paths:
- '~/.cache/pip'
- run: ./.circleci/deploy.sh
check_code_quality:
working_directory: ~/transformers
docker:
@ -321,6 +336,7 @@ jobs:
- run: python utils/check_copies.py
- run: python utils/check_dummies.py
- run: python utils/check_repo.py
check_repository_consistency:
working_directory: ~/transformers
docker:
@ -351,6 +367,7 @@ jobs:
- setup_remote_docker
- *build_push_docker
- *deploy_cluster
cleanup-gke-jobs:
docker:
- image: circleci/python:3.6
@ -360,6 +377,7 @@ jobs:
cluster: $GKE_CLUSTER
perform-login: true
- *delete_gke_jobs
workflow_filters: &workflow_filters
filters:
branches:

View File

@ -61,7 +61,19 @@ jobs:
CUDA_VISIBLE_DEVICES: 0
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/tests_torch_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_all_tests_torch_gpu_test_reports
path: reports
run_tests_tf_gpu:
runs-on: [self-hosted, single-gpu]
@ -110,7 +122,18 @@ jobs:
CUDA_VISIBLE_DEVICES: 0
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/tests_tf_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_all_tests_tf_gpu_test_reports
path: reports
run_tests_torch_multiple_gpu:
runs-on: [self-hosted, multi-gpu]
@ -158,7 +181,18 @@ jobs:
OMP_NUM_THREADS: 1
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_torch_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/tests_torch_multiple_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_all_tests_torch_multi_gpu_test_reports
path: reports
run_tests_tf_multiple_gpu:
runs-on: [self-hosted, multi-gpu]
@ -206,4 +240,16 @@ jobs:
OMP_NUM_THREADS: 1
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/
python -m pytest -n 2 --dist=loadfile -s --make-reports=tests_tf_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/tests_tf_multiple_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_all_tests_tf_multi_gpu_test_reports
path: reports

View File

@ -60,11 +60,11 @@ jobs:
RUN_SLOW: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s --make_reports=tests_torch tests
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_torch_failures_short.txt
run: cat reports/tests_torch_gpu_failures_short.txt
- name: Run examples tests on GPU
if: ${{ always() }}
@ -74,11 +74,11 @@ jobs:
run: |
source .env/bin/activate
pip install -r examples/requirements.txt
python -m pytest -n 1 --dist=loadfile -s --make_reports=examples_torch examples
python -m pytest -n 1 --dist=loadfile -s --make-reports=examples_torch_gpu examples
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_examples_torch_failures_short.txt
run: cat reports/examples_torch_gpu_failures_short.txt
- name: Run all pipeline tests on GPU
if: ${{ always() }}
@ -89,11 +89,11 @@ jobs:
RUN_PIPELINE_TESTS: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make_reports=tests_torch_pipeline tests
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_torch_pipeline_failures_short.txt
run: cat reports/tests_torch_pipeline_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
@ -154,13 +154,14 @@ jobs:
RUN_SLOW: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s --make_reports=tests_tf tests
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_tf_failures_short.txt
run: cat reports/tests_tf_gpu_failures_short.txt
- name: Run all pipeline tests on GPU
if: ${{ always() }}
env:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
@ -168,11 +169,11 @@ jobs:
RUN_PIPELINE_TESTS: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s tests -m is_pipeline_test --make_reports=tests_tf_pipelines tests
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipelines_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_tf_pipelines_failures_short.txt
run: cat reports/tests_tf_pipelines_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
@ -232,11 +233,11 @@ jobs:
RUN_SLOW: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s --make_reports=tests_torch tests
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_torch_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_torch_failures_short.txt
run: cat reports/tests_torch_multiple_gpu_failures_short.txt
- name: Run all pipeline tests on GPU
if: ${{ always() }}
@ -247,11 +248,11 @@ jobs:
RUN_PIPELINE_TESTS: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make_reports=tests_torch_pipeline tests
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_torch_pipeline_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_torch_pipeline_failures_short.txt
run: cat reports/tests_torch_pipeline_multiple_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
@ -311,13 +312,14 @@ jobs:
RUN_SLOW: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s tests --make_reports=tests_tf tests
python -m pytest -n 1 --dist=loadfile -s --make-reports=tests_tf_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_tf_failures_short.txt
run: cat reports/tests_tf_multiple_gpu_failures_short.txt
- name: Run all pipeline tests on GPU
if: ${{ always() }}
env:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
@ -325,11 +327,11 @@ jobs:
RUN_PIPELINE_TESTS: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s tests -m is_pipeline_test --make_reports=tests_tf_pipelines tests
python -m pytest -n 1 --dist=loadfile -s -m is_pipeline_test --make-reports=tests_tf_pipelines_multiple_gpu tests
- name: Failure short reports
if: ${{ always() }}
run: cat reports/report_tests_tf_pipelines_failures_short.txt
run: cat reports/tests_tf_multiple_gpu_pipelines_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
@ -337,4 +339,4 @@ jobs:
with:
name: run_all_tests_tf_multi_gpu_test_reports
path: reports

View File

@ -17,17 +17,14 @@ warnings.simplefilter(action="ignore", category=FutureWarning)
def pytest_addoption(parser):
parser.addoption(
"--make_reports",
action="store",
default=False,
help="generate report files - the value will be used as a `report_`+val+`reportname.txt`",
)
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from transformers.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make_reports")
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)

View File

@ -695,6 +695,31 @@ def mockenv(**kwargs):
return unittest.mock.patch.dict(os.environ, kwargs)
# --- pytest conf functions --- #
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}
def pytest_addoption_shared(parser):
"""
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
option.
"""
option = "--make-reports"
if option not in pytest_opt_registered:
parser.addoption(
option,
action="store",
default=False,
help="generate report files. The value of this option is used as a prefix to report names",
)
pytest_opt_registered[option] = 1
def pytest_terminal_summary_main(tr, id):
"""
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
@ -728,7 +753,7 @@ def pytest_terminal_summary_main(tr, id):
dir = "reports"
Path(dir).mkdir(parents=True, exist_ok=True)
report_files = {
k: f"{dir}/report_{id}_{k}.txt"
k: f"{dir}/{id}_{k}.txt"
for k in [
"durations",
"errors",
@ -824,7 +849,7 @@ def pytest_terminal_summary_main(tr, id):
config.option.tbstyle = orig_tbstyle
# the following code deals with async io between processes
# --- distributed testing functions --- #
# adapted from https://stackoverflow.com/a/59041913/9201239
import asyncio # noqa

View File

@ -24,17 +24,14 @@ def pytest_configure(config):
def pytest_addoption(parser):
parser.addoption(
"--make_reports",
action="store",
default=False,
help="generate report files - the value will be used as a `report_`+val+`reportname.txt`",
)
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(parser)
def pytest_terminal_summary(terminalreporter):
from transformers.testing_utils import pytest_terminal_summary_main
make_reports = terminalreporter.config.getoption("--make_reports")
make_reports = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(terminalreporter, id=make_reports)