Update tiny model info. and pipeline testing (#25213)

* update tiny_model_summary.json

* update

* update

* update

---------

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
This commit is contained in:
Yih-Dar 2023-07-31 19:35:33 +02:00 committed by GitHub
parent e0c50b274a
commit 1b4f6199c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 358 additions and 10 deletions

View File

@ -278,9 +278,9 @@ class FalconModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMix
pipeline_model_mapping = (
{
"feature-extraction": FalconModel,
"question-answering": FalconForQuestionAnswering,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}

View File

@ -362,7 +362,16 @@ class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
test_torchscript = False
test_head_masking = False
pipeline_model_mapping = (
{"feature-extraction": MptModel, "text-generation": MptForCausalLM} if is_torch_available() else {}
{
"feature-extraction": MptModel,
"question-answering": MptForQuestionAnswering,
"text-classification": MptForSequenceClassification,
"text-generation": MptForCausalLM,
"token-classification": MptForTokenClassification,
"zero-shot": MptForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):

View File

@ -22,6 +22,7 @@ from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
@ -280,7 +281,7 @@ class MraModelTester:
@require_torch
class MraModelTest(ModelTesterMixin, unittest.TestCase):
class MraModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
MraModel,
@ -299,6 +300,18 @@ class MraModelTest(ModelTesterMixin, unittest.TestCase):
has_attentions = False
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": MraModel,
"fill-mask": MraForMaskedLM,
"question-answering": MraForQuestionAnswering,
"text-classification": MraForSequenceClassification,
"token-classification": MraForTokenClassification,
"zero-shot": MraForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = MraModelTester(self)

View File

@ -30,6 +30,7 @@ from transformers.testing_utils import (
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
@ -154,8 +155,13 @@ def prepare_img():
@require_torch
class PvtModelTest(ModelTesterMixin, unittest.TestCase):
class PvtModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (PvtModel, PvtForImageClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": PvtModel, "image-classification": PvtForImageClassification}
if is_torch_available()
else {}
)
test_head_masking = False
test_pruning = False

View File

@ -560,11 +560,11 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
{
"conversational": T5ForConditionalGeneration,
"feature-extraction": T5Model,
"question-answering": T5ForQuestionAnswering,
"summarization": T5ForConditionalGeneration,
"text-classification": T5ForSequenceClassification,
"text2text-generation": T5ForConditionalGeneration,
"translation": T5ForConditionalGeneration,
"question-answering": T5ForQuestionAnswering,
"text-classification": T5ForSequenceClassification,
"zero-shot": T5ForSequenceClassification,
}
if is_torch_available()
@ -583,6 +583,16 @@ class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin,
self.model_tester = T5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
# `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file
# `src/transformers/data/processors/squad.py` (where this test fails for this model)
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible:
return

View File

@ -296,11 +296,11 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
{
"conversational": UMT5ForConditionalGeneration,
"feature-extraction": UMT5Model,
"question-answering": UMT5ForQuestionAnswering,
"summarization": UMT5ForConditionalGeneration,
"text-classification": UMT5ForSequenceClassification,
"text2text-generation": UMT5ForConditionalGeneration,
"translation": UMT5ForConditionalGeneration,
"question-answering": UMT5ForQuestionAnswering,
"text-classification": UMT5ForSequenceClassification,
"zero-shot": UMT5ForSequenceClassification,
}
if is_torch_available()
@ -317,6 +317,16 @@ class UMT5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin
def setUp(self):
self.model_tester = UMT5ModelTester(self)
# `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file
# `src/transformers/data/processors/squad.py` (where this test fails for this model)
def is_pipeline_test_to_skip(
self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name
):
if pipeline_test_casse_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"):
return True
return False
def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False):
if not is_torch_fx_available() or not self.fx_compatible:
return

View File

@ -29,6 +29,7 @@ from transformers.utils import cached_property, is_torch_available, is_vision_av
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
@ -153,13 +154,18 @@ class VivitModelTester:
@require_torch
class VivitModelTest(ModelTesterMixin, unittest.TestCase):
class VivitModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as Vivit does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (VivitModel, VivitForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": VivitModel, "video-classification": VivitForVideoClassification}
if is_torch_available()
else {}
)
test_pruning = False
test_torchscript = False

View File

@ -1084,6 +1084,16 @@
],
"sha": "1d6ae6c0b60868dffbef0dddeda381c51c6dcba5"
},
"Data2VecAudioForAudioFrameClassification": {
"tokenizer_classes": [],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Data2VecAudioForAudioFrameClassification"
],
"sha": "a64828b27e73fc8dd95aeb315108ca2f6a66b55f"
},
"Data2VecAudioForCTC": {
"tokenizer_classes": [],
"processor_classes": [
@ -1509,6 +1519,26 @@
],
"sha": "d6c75bc51196f0a683afb12de6310fdda13efefd"
},
"Dinov2ForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"Dinov2ForImageClassification"
],
"sha": "ae44840966456aae33641df2c8c8a4af5b457b24"
},
"Dinov2Model": {
"tokenizer_classes": [],
"processor_classes": [
"BitImageProcessor"
],
"model_classes": [
"Dinov2Model"
],
"sha": "6f560b1cc9806bcf84fe0b0c60b5faf9c29be959"
},
"DistilBertForMaskedLM": {
"tokenizer_classes": [
"DistilBertTokenizer",
@ -3931,6 +3961,122 @@
],
"sha": "2f46357659db2d6d54d870e28073deeea1c8cb64"
},
"MptForCausalLM": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForCausalLM"
],
"sha": "500c869b956c65f6b1a7b4867727f124c6f5728a"
},
"MptForQuestionAnswering": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForQuestionAnswering"
],
"sha": "6ee46572bf61eb5e7dbbdaf00b73c4d37efc42d9"
},
"MptForSequenceClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForSequenceClassification"
],
"sha": "f0b9153413b5dfceeb96b67d4b0f22c94bbaf64a"
},
"MptForTokenClassification": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptForTokenClassification"
],
"sha": "3f7c3ccd67cd0b2aae56d37613429a64ef813246"
},
"MptModel": {
"tokenizer_classes": [
"GPTNeoXTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MptModel"
],
"sha": "ea747f234556661b0c8b84a626f267066ce586bf"
},
"MraForMaskedLM": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForMaskedLM"
],
"sha": "c00ee46cfd2b8fed29cc37f0a4ead40ad51a439c"
},
"MraForMultipleChoice": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForMultipleChoice"
],
"sha": "f397469ba8109f64dab2d75335ea7bf0c2dbeb74"
},
"MraForQuestionAnswering": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForQuestionAnswering"
],
"sha": "c2ed75acd20e5440a76d6504d9a3ebc2513011f0"
},
"MraForSequenceClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForSequenceClassification"
],
"sha": "f47672d3708508bda7774215bee44a92ec16ab2f"
},
"MraForTokenClassification": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraForTokenClassification"
],
"sha": "f0961ab5818bca473607fb94b391c186dc1d3492"
},
"MraModel": {
"tokenizer_classes": [
"RobertaTokenizer",
"RobertaTokenizerFast"
],
"processor_classes": [],
"model_classes": [
"MraModel"
],
"sha": "315f34f30bcc4b0b66b11987726df2a80c50e271"
},
"MvpForCausalLM": {
"tokenizer_classes": [
"MvpTokenizer",
@ -4500,7 +4646,8 @@
"T5TokenizerFast"
],
"processor_classes": [
"Pix2StructImageProcessor"
"Pix2StructImageProcessor",
"Pix2StructProcessor"
],
"model_classes": [],
"sha": "42b3de00ad535076c4893e4ac5ae2d2748cc4ccb"
@ -4555,6 +4702,26 @@
],
"sha": "f1ddbbcc768c7ba54c4d75b319540c1635e65937"
},
"PvtForImageClassification": {
"tokenizer_classes": [],
"processor_classes": [
"PvtImageProcessor"
],
"model_classes": [
"PvtForImageClassification"
],
"sha": "589b37bd6941aff6dd248259f9eee3c422a41fde"
},
"PvtModel": {
"tokenizer_classes": [],
"processor_classes": [
"PvtImageProcessor"
],
"model_classes": [
"PvtModel"
],
"sha": "c40765c382515ae627652d60e9077b6478448d48"
},
"ReformerForMaskedLM": {
"tokenizer_classes": [
"ReformerTokenizer",
@ -5498,6 +5665,18 @@
],
"sha": "275bbf6d389bfd0540b9f824c609c6b22a577328"
},
"T5EncoderModel": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5EncoderModel",
"TFT5EncoderModel"
],
"sha": "1c75090036a2b3740dfe2d570b889332ad8e59e8"
},
"T5ForConditionalGeneration": {
"tokenizer_classes": [
"T5Tokenizer",
@ -5510,6 +5689,28 @@
],
"sha": "593fd6072a4e265f5cc73b1973cd8af76b261f29"
},
"T5ForQuestionAnswering": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5ForQuestionAnswering"
],
"sha": "b9edf2de494244ff032f67d2d7bdf6c591000c94"
},
"T5ForSequenceClassification": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"T5ForSequenceClassification"
],
"sha": "105b5c4c8e1efe927444108f1388c4f102ebad15"
},
"T5Model": {
"tokenizer_classes": [
"T5Tokenizer",
@ -5659,6 +5860,50 @@
],
"sha": "c3cbf7a6159c038f333ce7adda2480ea3396b2b3"
},
"UMT5EncoderModel": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5EncoderModel"
],
"sha": "2894e49c9fbd17ea4b3dab56ec388be354c1a5f0"
},
"UMT5ForQuestionAnswering": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5ForQuestionAnswering"
],
"sha": "b381aa068a44200db539f2f48f4e34a5ed1cb093"
},
"UMT5ForSequenceClassification": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5ForSequenceClassification"
],
"sha": "aa9f77b7b3cff21425b7512e7c0f478af7b5db14"
},
"UMT5Model": {
"tokenizer_classes": [
"T5Tokenizer",
"T5TokenizerFast"
],
"processor_classes": [],
"model_classes": [
"UMT5Model"
],
"sha": "9180d850b24e5494442a4f7a8ca1a4c102f9babd"
},
"UniSpeechForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
@ -5707,6 +5952,18 @@
],
"sha": "18e170eb1091715b74ace28c8c380b6bf2b6202d"
},
"UniSpeechSatForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"UniSpeechSatForAudioFrameClassification"
],
"sha": "7eba5a1c6cd610928b27ecb217bb17c729a07a57"
},
"UniSpeechSatForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
@ -5997,6 +6254,18 @@
],
"sha": "85020189fb7bf1217eb9370b09bca8ec5bcfdafa"
},
"Wav2Vec2ConformerForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ConformerForAudioFrameClassification"
],
"sha": "e316a18a1d165b4cb51a7f28f8e8dab676da4b56"
},
"Wav2Vec2ConformerForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
@ -6057,6 +6326,18 @@
],
"sha": "ef2fe3aa8c23e6f8696e6612061aaddecae49994"
},
"Wav2Vec2ForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"Wav2Vec2ForAudioFrameClassification"
],
"sha": "ab219f119e10f56e1059966c66d23f0df3c2c343"
},
"Wav2Vec2ForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
@ -6101,6 +6382,7 @@
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"TFWav2Vec2ForSequenceClassification",
"Wav2Vec2ForSequenceClassification"
],
"sha": "2000b2022abcc37100241485f5872126b70164c9"
@ -6130,6 +6412,18 @@
],
"sha": "7a998ee3ee0619a52828a79c3eed6872fd053f37"
},
"WavLMForAudioFrameClassification": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"
],
"processor_classes": [
"Wav2Vec2FeatureExtractor"
],
"model_classes": [
"WavLMForAudioFrameClassification"
],
"sha": "b135610f8d5de0b1a5bf5ed7212966135c63d6ec"
},
"WavLMForCTC": {
"tokenizer_classes": [
"Wav2Vec2CTCTokenizer"