From 6695450a23545bc9d5416f39ab39609c7811c653 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Fri, 7 Aug 2020 07:36:32 -0700 Subject: [PATCH] [examples] consistently use --gpus, instead of --n_gpu (#6315) --- examples/distillation/README.md | 4 ++-- examples/distillation/train.py | 2 +- examples/text-classification/README.md | 6 +----- examples/token-classification/README.md | 2 +- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/examples/distillation/README.md b/examples/distillation/README.md index 0df41ff63e..8eb4730259 100644 --- a/examples/distillation/README.md +++ b/examples/distillation/README.md @@ -55,7 +55,7 @@ Here are the results on the *test* sets for 6 of the languages available in XNLI ## Setup -This part of the library has only be tested with Python3.6+. There are few specific dependencies to install before launching a distillation, you can install them with the command `pip install -r requirements.txt`. +This part of the library has only be tested with Python3.6+. There are few specific dependencies to install before launching a distillation, you can install them with the command `pip install -r requirements.txt`. **Important note:** The training scripts have been updated to support PyTorch v1.2.0 (there are breakings changes compared to v1.1.0). @@ -161,7 +161,7 @@ python -m torch.distributed.launch \ --master_port $MASTER_PORT \ train.py \ --force \ - --n_gpu $WORLD_SIZE \ + --gpus $WORLD_SIZE \ --student_type distilbert \ --student_config training_configs/distilbert-base-uncased.json \ --teacher_type bert \ diff --git a/examples/distillation/train.py b/examples/distillation/train.py index 0d21ae04f8..ce5df33198 100644 --- a/examples/distillation/train.py +++ b/examples/distillation/train.py @@ -210,7 +210,7 @@ def main(): help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) - parser.add_argument("--n_gpu", type=int, default=1, help="Number of GPUs in the node.") + parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") diff --git a/examples/text-classification/README.md b/examples/text-classification/README.md index 90b6fb8b37..22be663d30 100644 --- a/examples/text-classification/README.md +++ b/examples/text-classification/README.md @@ -249,7 +249,7 @@ The results are the following: Run `bash run_pl.sh` from the `glue` directory. This will also install `pytorch-lightning` and the requirements in `examples/requirements.txt`. It is a shell pipeline that will automatically download, pre-process the data and run the specified models. Logs are saved in `lightning_logs` directory. -Pass `--n_gpu` flag to change the number of GPUs. Default uses 1. At the end, the expected results are: +Pass `--gpus` flag to change the number of GPUs. Default uses 1. At the end, the expected results are: ``` TEST RESULTS {'val_loss': tensor(0.0707), 'precision': 0.852427800698191, 'recall': 0.869537067011978, 'f1': 0.8608974358974358} @@ -294,7 +294,3 @@ Training with the previously defined hyper-parameters yields the following resul ```bash acc = 0.7093812375249501 ``` - - - - diff --git a/examples/token-classification/README.md b/examples/token-classification/README.md index 811681f067..fb6291fc37 100644 --- a/examples/token-classification/README.md +++ b/examples/token-classification/README.md @@ -134,7 +134,7 @@ On the test dataset the following results could be achieved: Run `bash run_pl.sh` from the `ner` directory. This would also install `pytorch-lightning` and the `examples/requirements.txt`. It is a shell pipeline which would automatically download, pre-process the data and run the models in `germeval-model` directory. Logs are saved in `lightning_logs` directory. -Pass `--n_gpu` flag to change the number of GPUs. Default uses 1. At the end, the expected results are: `TEST RESULTS {'val_loss': tensor(0.0707), 'precision': 0.852427800698191, 'recall': 0.869537067011978, 'f1': 0.8608974358974358}` +Pass `--gpus` flag to change the number of GPUs. Default uses 1. At the end, the expected results are: `TEST RESULTS {'val_loss': tensor(0.0707), 'precision': 0.852427800698191, 'recall': 0.869537067011978, 'f1': 0.8608974358974358}` #### Run the Tensorflow 2 version