remove USE_CUDA (#7861)

This commit is contained in:
Stas Bekman 2020-10-19 04:08:34 -07:00 committed by GitHub
parent ea1507fb45
commit 4eb61f8e88
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 16 additions and 20 deletions

View File

@ -59,7 +59,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
# TF_GPU_MEMORY_LIMIT: 4096
OMP_NUM_THREADS: 1
USE_CUDA: yes
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/
@ -110,7 +109,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
# TF_GPU_MEMORY_LIMIT: 4096
OMP_NUM_THREADS: 1
USE_CUDA: yes
run: |
source .env/bin/activate
python -m pytest -n 2 --dist=loadfile -s ./tests/

View File

@ -57,7 +57,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
RUN_SLOW: yes
USE_CUDA: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s ./tests/
@ -67,7 +66,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
RUN_SLOW: yes
USE_CUDA: yes
run: |
source .env/bin/activate
pip install -r examples/requirements.txt
@ -120,7 +118,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
RUN_SLOW: yes
USE_CUDA: yes
run: |
source .env/bin/activate
python -m pytest -n 1 --dist=loadfile -s ./tests/
@ -130,7 +127,6 @@ jobs:
TF_FORCE_GPU_ALLOW_GROWTH: "true"
OMP_NUM_THREADS: 1
RUN_SLOW: yes
USE_CUDA: yes
run: |
source .env/bin/activate
pip install -r examples/requirements.txt

View File

@ -22,12 +22,12 @@ How transformers are tested
* `self-hosted (push) <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-push.yml>`__: runs fast tests on GPU only on commits on ``master``. It only runs if a commit on ``master`` has updated the code in one of the following folders: ``src``, ``tests``, ``.github`` (to prevent running on added model cards, notebooks, etc.)
* `self-hosted runner <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-scheduled.yml>`__: runs slow tests on ``tests`` and ``examples``:
* `self-hosted runner <https://github.com/huggingface/transformers/blob/master/.github/workflows/self-scheduled.yml>`__: runs normal and slow tests on GPU in ``tests`` and ``examples``:
.. code-block:: bash
RUN_SLOW=1 USE_CUDA=1 pytest tests/
RUN_SLOW=1 USE_CUDA=1 pytest examples/
RUN_SLOW=1 pytest tests/
RUN_SLOW=1 pytest examples/
The results can be observed `here <https://github.com/huggingface/transformers/actions>`__.
@ -393,7 +393,7 @@ On a GPU-enabled setup, to test in CPU-only mode add ``CUDA_VISIBLE_DEVICES=""``
CUDA_VISIBLE_DEVICES="" pytest tests/test_logging.py
or if you have multiple gpus, you can tell which one to use in this test session, e.g. to use only the second gpu if you have gpus ``0`` and ``1``, you can run:
or if you have multiple gpus, you can specify which one is to be used by ``pytest``. For example, to use only the second gpu if you have gpus ``0`` and ``1``, you can run:
.. code-block:: bash

View File

@ -2,5 +2,5 @@
# these scripts need to be run before any changes to FSMT-related code - it should cover all bases
USE_CUDA=0 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py
USE_CUDA=1 RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py
CUDA_VISIBLE_DEVICES="" RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py
RUN_SLOW=1 pytest --disable-warnings tests/test_tokenization_fsmt.py tests/test_configuration_auto.py tests/test_modeling_fsmt.py examples/seq2seq/test_fsmt_bleu_score.py

View File

@ -187,8 +187,10 @@ def require_torch_tpu(test_case):
if _torch_available:
# Set the USE_CUDA environment variable to select a GPU.
torch_device = "cuda" if parse_flag_from_env("USE_CUDA") else "cpu"
# Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
import torch
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
else:
torch_device = None
@ -485,9 +487,9 @@ class TestCasePlus(unittest.TestCase):
def mockenv(**kwargs):
"""this is a convenience wrapper, that allows this:
@mockenv(USE_CUDA=True, USE_TF=False)
@mockenv(RUN_SLOW=True, USE_TF=False)
def test_something():
use_cuda = os.getenv("USE_CUDA", False)
run_slow = os.getenv("RUN_SLOW", False)
use_tf = os.getenv("USE_TF", False)
"""
return unittest.mock.patch.dict(os.environ, kwargs)

View File

@ -23,10 +23,10 @@
# the following 4 should be run. But since we have different CI jobs running
# different configs, all combinations should get covered
#
# USE_CUDA=1 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=0 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# USE_CUDA=1 RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py
# RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py
import os
import unittest