From e18da4e3f292b05a81eef6925e19caea22d4e14b Mon Sep 17 00:00:00 2001 From: Clint Adams Date: Tue, 28 May 2024 05:48:23 -0400 Subject: [PATCH] fix "piano" typo (#31027) --- src/transformers/models/pop2piano/modeling_pop2piano.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/pop2piano/modeling_pop2piano.py b/src/transformers/models/pop2piano/modeling_pop2piano.py index 7b8795e453..c769cff3c4 100644 --- a/src/transformers/models/pop2piano/modeling_pop2piano.py +++ b/src/transformers/models/pop2piano/modeling_pop2piano.py @@ -71,7 +71,7 @@ POP2PIANO_INPUTS_DOCSTRING = r""" so you should be able to pad the inputs on both the right and the left. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for detail. [What are input IDs?](../glossary#input-ids) To know more on how to prepare `input_ids` for pretraining - take a look a [Pop2Pianp Training](./Pop2Piano#training). + take a look a [Pop2Piano Training](./Pop2Piano#training). attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**,