diff --git a/examples/seq2seq/finetune_trainer.py b/examples/seq2seq/finetune_trainer.py index 5159b82ef3..30cc30353a 100755 --- a/examples/seq2seq/finetune_trainer.py +++ b/examples/seq2seq/finetune_trainer.py @@ -98,6 +98,7 @@ class DataTrainingArguments: metadata={ "help": "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." + " This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``" }, ) test_max_target_length: Optional[int] = field( diff --git a/examples/seq2seq/utils.py b/examples/seq2seq/utils.py index 8014463122..437cdf2e63 100644 --- a/examples/seq2seq/utils.py +++ b/examples/seq2seq/utils.py @@ -434,7 +434,8 @@ def use_task_specific_params(model, task): if task_specific_params is not None: pars = task_specific_params.get(task, {}) - logger.info(f"using task specific params for {task}: {pars}") + logger.info(f"setting model.config to task specific params for {task}:\n {pars}") + logger.info("note: command line args may override some of these") model.config.update(pars)