From 1c286be5081568a1b26c7c8b42932aa40f3a0c64 Mon Sep 17 00:00:00 2001 From: dumpmemory <64742282+dumpmemory@users.noreply.github.com> Date: Sat, 16 Dec 2023 00:18:56 +0800 Subject: [PATCH] Fix bug for checkpoint saving on multi node training setting (#28078) * add multi-node traning setting * fix style --- src/transformers/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 0b56488907..ffe5f5c0d1 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -2386,7 +2386,9 @@ class Trainer: self.args.distributed_state.wait_for_everyone() # Then go through the rewriting process starting on process 0 if staging_output_dir != output_dir: - with self.args.main_process_first(desc="Renaming model checkpoint folder to true location"): + with self.args.main_process_first( + desc="Renaming model checkpoint folder to true location", local=self.args.save_on_each_node + ): if os.path.exists(staging_output_dir): os.rename(staging_output_dir, output_dir)