DataParallel fixes (#5733)

* DataParallel fixes:

1. switched to a more precise check
-        if self.args.n_gpu > 1:
+        if isinstance(model, nn.DataParallel):

2. fix tests - require the same fixup under DataParallel as the training module

* another fix
This commit is contained in:
Stas Bekman 2020-07-20 06:29:12 -07:00 committed by GitHub
parent 290b6e18ac
commit 35cb101eae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 2 deletions

View File

@ -199,6 +199,9 @@ def train(args, train_dataset, model, tokenizer):
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
if isinstance(model, torch.nn.DataParallel):
inputs["return_tuple"] = True
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]

View File

@ -623,7 +623,7 @@ class Trainer:
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
# Our model outputs do not work with DataParallel, so forcing return tuple.
if self.args.n_gpu > 1:
if isinstance(model, nn.DataParallel):
inputs["return_tuple"] = True
outputs = model(**inputs)
@ -826,7 +826,7 @@ class Trainer:
if self.args.past_index >= 0:
inputs["mems"] = past
# Our model outputs do not work with DataParallel, so forcing return tuple.
if self.args.n_gpu > 1:
if isinstance(model, nn.DataParallel):
inputs["return_tuple"] = True
with torch.no_grad():

View File

@ -803,6 +803,8 @@ class ModelTesterMixin:
# Wrap model in nn.DataParallel
model = torch.nn.DataParallel(model)
# Our model outputs do not work with DataParallel, so forcing return tuple.
inputs_dict["return_tuple"] = True
with torch.no_grad():
_ = model(**self._prepare_for_class(inputs_dict, model_class))