Fix load of model checkpoints in the Trainer (#18470)

This commit is contained in:
Sylvain Gugger 2022-08-04 08:22:25 -04:00 committed by GitHub
parent 330247ede2
commit df28de0581
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 2 additions and 2 deletions

View File

@ -1935,7 +1935,7 @@ class Trainer:
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
load_result = model.load_state_dict(state_dict)
load_result = model.load_state_dict(state_dict, strict=False)
# release memory
del state_dict
self._issue_warnings_after_load(load_result)
@ -1989,7 +1989,7 @@ class Trainer:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
load_result = model.load_state_dict(state_dict)
load_result = model.load_state_dict(state_dict, strict=False)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):