Workflow fixes (#11270)

This commit is contained in:
Lysandre Debut 2021-04-15 23:21:17 -04:00 committed by GitHub
parent dfc6dd8584
commit 5254220e7f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 46 additions and 2 deletions

View File

@ -38,6 +38,7 @@ jobs:
apt -y update && apt install -y libsndfile1-dev apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech] pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
@ -121,6 +122,7 @@ jobs:
apt -y update && apt install -y libsndfile1-dev apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech] pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
@ -220,6 +222,13 @@ jobs:
if: ${{ always() }} if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_gpu_test_reports
path: reports
run_tests_torch_cuda_extensions_multi_gpu: run_tests_torch_cuda_extensions_multi_gpu:
runs-on: [self-hosted, docker-gpu, multi-gpu] runs-on: [self-hosted, docker-gpu, multi-gpu]
container: container:
@ -253,6 +262,13 @@ jobs:
if: ${{ always() }} if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
path: reports
send_results: send_results:
name: Send results to webhook name: Send results to webhook

View File

@ -33,7 +33,8 @@ jobs:
run: | run: |
apt -y update && apt install -y libsndfile1-dev apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech,deepspeed] pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
@ -155,7 +156,8 @@ jobs:
run: | run: |
apt -y update && apt install -y libsndfile1-dev apt -y update && apt install -y libsndfile1-dev
pip install --upgrade pip pip install --upgrade pip
pip install .[sklearn,testing,onnxruntime,sentencepiece,speech,deepspeed,fairscale] pip install .[sklearn,testing,onnxruntime,sentencepiece,speech]
pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.8.0+cu111.html
- name: Are GPUs recognized by our DL frameworks - name: Are GPUs recognized by our DL frameworks
run: | run: |
@ -279,6 +281,13 @@ jobs:
if: ${{ always() }} if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt run: cat reports/tests_torch_cuda_extensions_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_gpu_test_reports
path: reports
run_all_tests_torch_cuda_extensions_multi_gpu: run_all_tests_torch_cuda_extensions_multi_gpu:
runs-on: [self-hosted, docker-gpu, multi-gpu] runs-on: [self-hosted, docker-gpu, multi-gpu]
container: container:
@ -312,6 +321,13 @@ jobs:
if: ${{ always() }} if: ${{ always() }}
run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt run: cat reports/tests_torch_cuda_extensions_multi_gpu_failures_short.txt
- name: Test suite reports artifacts
if: ${{ always() }}
uses: actions/upload-artifact@v2
with:
name: run_tests_torch_cuda_extensions_multi_gpu_test_reports
path: reports
send_results: send_results:
name: Send results to webhook name: Send results to webhook
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -128,6 +128,12 @@ if __name__ == "__main__":
"common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt", "common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt",
"pipeline": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_pipeline_multi_gpu_[].txt", "pipeline": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_pipeline_multi_gpu_[].txt",
}, },
"Torch Cuda Extensions Single GPU": {
"common": "run_tests_torch_cuda_extensions_gpu_test_reports/tests_torch_cuda_extensions_gpu_[].txt"
},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/tests_torch_cuda_extensions_multi_gpu_[].txt"
},
} }
else: else:
file_paths = { file_paths = {
@ -135,6 +141,12 @@ if __name__ == "__main__":
"Torch Single GPU": {"common": "run_all_tests_torch_gpu_test_reports/tests_torch_gpu_[].txt"}, "Torch Single GPU": {"common": "run_all_tests_torch_gpu_test_reports/tests_torch_gpu_[].txt"},
"TF Multi GPU": {"common": "run_all_tests_tf_multi_gpu_test_reports/tests_tf_multi_gpu_[].txt"}, "TF Multi GPU": {"common": "run_all_tests_tf_multi_gpu_test_reports/tests_tf_multi_gpu_[].txt"},
"Torch Multi GPU": {"common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt"}, "Torch Multi GPU": {"common": "run_all_tests_torch_multi_gpu_test_reports/tests_torch_multi_gpu_[].txt"},
"Torch Cuda Extensions Single GPU": {
"common": "run_tests_torch_cuda_extensions_gpu_test_reports/tests_torch_cuda_extensions_gpu_[].txt"
},
"Torch Cuda Extensions Multi GPU": {
"common": "run_tests_torch_cuda_extensions_multi_gpu_test_reports/tests_torch_cuda_extensions_multi_gpu_[].txt"
},
} }
client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) client = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])