Improve test protocol for inputs_embeds in TF
cc @lysandrejik
This commit is contained in:
parent
b632145273
commit
cf62bdc962
|
@ -426,10 +426,15 @@ class TFCommonTestCases:
|
|||
try:
|
||||
x = wte([input_ids], mode="embedding")
|
||||
except:
|
||||
if hasattr(self.model_tester, "embedding_size"):
|
||||
x = tf.ones(input_ids.shape + [model.config.embedding_size], dtype=tf.dtypes.float32)
|
||||
else:
|
||||
x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32)
|
||||
x = wte([input_ids, None, None, None], mode="embedding")
|
||||
# ^^ In our TF models, the input_embeddings can take slightly different forms,
|
||||
# so we try a few of them.
|
||||
# We used to fall back to just synthetically creating a dummy tensor of ones:
|
||||
#
|
||||
# if hasattr(self.model_tester, "embedding_size"):
|
||||
# x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32)
|
||||
# else:
|
||||
# x = tf.ones(input_ids.shape + [self.model_tester.hidden_size], dtype=tf.dtypes.float32)
|
||||
inputs_dict["inputs_embeds"] = x
|
||||
outputs = model(inputs_dict)
|
||||
|
||||
|
|
Loading…
Reference in New Issue