Remove unneeded prints in modeling_gpt_neox.py (#27080)

This commit is contained in:
Younes Belkada 2023-10-26 11:55:31 +02:00 committed by GitHub
parent efba1a1744
commit fe2877ce21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 0 additions and 4 deletions

View File

@ -804,9 +804,6 @@ class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):
self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
): ):
input_shape = input_ids.shape input_shape = input_ids.shape
print(input_shape)
print(past_key_values[0][0].shape if past_key_values is not None else "no pkv")
# cut decoder_input_ids if past is used # cut decoder_input_ids if past is used
if past_key_values is not None: if past_key_values is not None:
past_length = past_key_values[0][0].shape[2] past_length = past_key_values[0][0].shape[2]
@ -837,7 +834,6 @@ class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):
model_inputs = {"inputs_embeds": inputs_embeds} model_inputs = {"inputs_embeds": inputs_embeds}
else: else:
model_inputs = {"input_ids": input_ids} model_inputs = {"input_ids": input_ids}
print(position_ids.shape)
model_inputs.update( model_inputs.update(
{ {
"attention_mask": attention_mask, "attention_mask": attention_mask,