From ef28df057281da2fcedaecbd265237d60a6ad69d Mon Sep 17 00:00:00 2001 From: Sylvain Date: Wed, 22 Mar 2023 20:45:08 -0400 Subject: [PATCH] Fix quality due to ruff release --- examples/flax/language-modeling/run_bart_dlm_flax.py | 8 +++----- examples/legacy/pytorch-lightning/run_glue.py | 2 +- examples/legacy/pytorch-lightning/run_ner.py | 4 ++-- examples/legacy/seq2seq/run_eval_search.py | 4 ++-- examples/legacy/token-classification/run_ner.py | 2 +- examples/legacy/token-classification/run_tf_ner.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- .../pytorch/token-classification/run_ner_no_trainer.py | 2 +- examples/research_projects/layoutlmv3/run_funsd_cord.py | 4 ++-- .../rag-end2end-retriever/finetune_rag.py | 4 ++-- examples/research_projects/rag/finetune_rag.py | 2 +- .../research_projects/seq2seq-distillation/finetune.py | 4 ++-- .../zero-shot-distillation/distill_classifier.py | 2 +- src/transformers/benchmark/benchmark_utils.py | 2 +- src/transformers/modelcard.py | 6 +++--- src/transformers/models/esm/tokenization_esm.py | 2 +- ...t_maskformer_original_pytorch_checkpoint_to_pytorch.py | 2 +- .../models/oneformer/convert_to_hf_oneformer.py | 2 +- ...speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py | 2 +- src/transformers/onnx/convert.py | 6 ++---- src/transformers/pipelines/document_question_answering.py | 2 +- src/transformers/utils/generic.py | 2 +- .../mask2former/test_image_processing_mask2former.py | 4 +--- .../models/maskformer/test_image_processing_maskformer.py | 4 +--- tests/models/oneformer/test_image_processing_oneformer.py | 4 +--- tests/models/oneformer/test_processor_oneformer.py | 4 +--- .../test_pipelines_automatic_speech_recognition.py | 7 ++----- tests/pipelines/test_pipelines_image_segmentation.py | 7 ++----- 28 files changed, 40 insertions(+), 58 deletions(-) diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 62e4e8a839..4cb862bb37 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -319,15 +319,13 @@ class FlaxDataCollatorForBartDenoisingLM: sentence_ends = np.argwhere(end_sentence_mask) sentence_ends[:, 1] += 1 example_has_multiple_sentences, num_sentences = np.unique(sentence_ends[:, 0], return_counts=True) - num_sentences_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_sentences)} + num_sentences_map = dict(zip(example_has_multiple_sentences, num_sentences)) num_to_permute = np.ceil(num_sentences * self.permute_sentence_ratio).astype(int) - num_to_permute_map = { - sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_to_permute) - } + num_to_permute_map = dict(zip(example_has_multiple_sentences, num_to_permute)) sentence_ends = np.split(sentence_ends[:, 1], np.unique(sentence_ends[:, 0], return_index=True)[1][1:]) - sentence_ends_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, sentence_ends)} + sentence_ends_map = dict(zip(example_has_multiple_sentences, sentence_ends)) for i in range(input_ids.shape[0]): if i not in example_has_multiple_sentences: diff --git a/examples/legacy/pytorch-lightning/run_glue.py b/examples/legacy/pytorch-lightning/run_glue.py index f96c5bafcd..5f22e2fc7a 100644 --- a/examples/legacy/pytorch-lightning/run_glue.py +++ b/examples/legacy/pytorch-lightning/run_glue.py @@ -124,7 +124,7 @@ class GLUETransformer(BaseTransformer): results = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task, preds, out_label_ids)} - ret = {k: v for k, v in results.items()} + ret = dict(results.items()) ret["log"] = results return ret, preds_list, out_label_list diff --git a/examples/legacy/pytorch-lightning/run_ner.py b/examples/legacy/pytorch-lightning/run_ner.py index 473851edef..7f6b00854d 100644 --- a/examples/legacy/pytorch-lightning/run_ner.py +++ b/examples/legacy/pytorch-lightning/run_ner.py @@ -122,7 +122,7 @@ class NERTransformer(BaseTransformer): preds = np.argmax(preds, axis=2) out_label_ids = np.concatenate([x["target"] for x in outputs], axis=0) - label_map = {i: label for i, label in enumerate(self.labels)} + label_map = dict(enumerate(self.labels)) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] @@ -140,7 +140,7 @@ class NERTransformer(BaseTransformer): "f1": f1_score(out_label_list, preds_list), } - ret = {k: v for k, v in results.items()} + ret = dict(results.items()) ret["log"] = results return ret, preds_list, out_label_list diff --git a/examples/legacy/seq2seq/run_eval_search.py b/examples/legacy/seq2seq/run_eval_search.py index 1ed08c2274..9b5debfb27 100755 --- a/examples/legacy/seq2seq/run_eval_search.py +++ b/examples/legacy/seq2seq/run_eval_search.py @@ -34,7 +34,7 @@ task_score_names = { def parse_search_arg(search): groups = search.split() - entries = {k: vs for k, vs in (g.split("=") for g in groups)} + entries = dict((g.split("=") for g in groups)) entry_names = list(entries.keys()) sets = [[f"--{k} {v}" for v in vs.split(":")] for k, vs in entries.items()] matrix = [list(x) for x in itertools.product(*sets)] @@ -105,7 +105,7 @@ def run_search(): col_widths = {col: len(str(col)) for col in col_names} results = [] for r in matrix: - hparams = {k: v for k, v in (x.replace("--", "").split() for x in r)} + hparams = dict((x.replace("--", "").split() for x in r)) args_exp = " ".join(r).split() args_exp.extend(["--bs", str(args.bs)]) # in case we need to reduce its size due to CUDA OOM sys.argv = args_normal + args_exp diff --git a/examples/legacy/token-classification/run_ner.py b/examples/legacy/token-classification/run_ner.py index 212ea986b4..c571d44a12 100644 --- a/examples/legacy/token-classification/run_ner.py +++ b/examples/legacy/token-classification/run_ner.py @@ -158,7 +158,7 @@ def main(): # Prepare CONLL-2003 task labels = token_classification_task.get_labels(data_args.labels) - label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} + label_map: Dict[int, str] = dict(enumerate(labels)) num_labels = len(labels) # Load pretrained model and tokenizer diff --git a/examples/legacy/token-classification/run_tf_ner.py b/examples/legacy/token-classification/run_tf_ner.py index df4770a70f..a9c41d5818 100755 --- a/examples/legacy/token-classification/run_tf_ner.py +++ b/examples/legacy/token-classification/run_tf_ner.py @@ -144,7 +144,7 @@ def main(): # Prepare Token Classification task labels = token_classification_task.get_labels(data_args.labels) - label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} + label_map: Dict[int, str] = dict(enumerate(labels)) num_labels = len(labels) # Load pretrained model and tokenizer diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index cf9b896622..af71ade116 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -407,7 +407,7 @@ def main(): # Set the correspondences label/ID inside the model config model.config.label2id = {l: i for i, l in enumerate(label_list)} - model.config.id2label = {i: l for i, l in enumerate(label_list)} + model.config.id2label = dict(enumerate(label_list)) # Map that sends B-Xxx label to its I-Xxx counterpart b_to_i_label = [] diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 1007ae2ca6..d76ee33ebc 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -442,7 +442,7 @@ def main(): # Set the correspondences label/ID inside the model config model.config.label2id = {l: i for i, l in enumerate(label_list)} - model.config.id2label = {i: l for i, l in enumerate(label_list)} + model.config.id2label = dict(enumerate(label_list)) # Map that sends B-Xxx label to its I-Xxx counterpart b_to_i_label = [] diff --git a/examples/research_projects/layoutlmv3/run_funsd_cord.py b/examples/research_projects/layoutlmv3/run_funsd_cord.py index 04e4498a1a..e826fd9974 100644 --- a/examples/research_projects/layoutlmv3/run_funsd_cord.py +++ b/examples/research_projects/layoutlmv3/run_funsd_cord.py @@ -294,11 +294,11 @@ def main(): if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. - id2label = {k: v for k, v in enumerate(label_list)} + id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} else: label_list = get_label_list(datasets["train"][label_column_name]) - id2label = {k: v for k, v in enumerate(label_list)} + id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} num_labels = len(label_list) diff --git a/examples/research_projects/rag-end2end-retriever/finetune_rag.py b/examples/research_projects/rag-end2end-retriever/finetune_rag.py index 194eeb3fa3..b0a6c18319 100644 --- a/examples/research_projects/rag-end2end-retriever/finetune_rag.py +++ b/examples/research_projects/rag-end2end-retriever/finetune_rag.py @@ -360,7 +360,7 @@ class GenerativeQAModule(BaseTransformer): loss_tensors = self._step(batch) - logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} + logs = dict(zip(self.loss_names, loss_tensors)) # tokens per batch tgt_pad_token_id = ( self.tokenizer.generator.pad_token_id @@ -434,7 +434,7 @@ class GenerativeQAModule(BaseTransformer): target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) # print(preds,target) loss_tensors = self._step(batch) - base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} + base_metrics = dict(zip(self.loss_names, loss_tensors)) gen_metrics: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) diff --git a/examples/research_projects/rag/finetune_rag.py b/examples/research_projects/rag/finetune_rag.py index 2e058850ec..64116a1d53 100644 --- a/examples/research_projects/rag/finetune_rag.py +++ b/examples/research_projects/rag/finetune_rag.py @@ -321,7 +321,7 @@ class GenerativeQAModule(BaseTransformer): preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) loss_tensors = self._step(batch) - base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} + base_metrics = dict(zip(self.loss_names, loss_tensors)) gen_metrics: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) diff --git a/examples/research_projects/seq2seq-distillation/finetune.py b/examples/research_projects/seq2seq-distillation/finetune.py index a13f9b533d..ff889af81e 100755 --- a/examples/research_projects/seq2seq-distillation/finetune.py +++ b/examples/research_projects/seq2seq-distillation/finetune.py @@ -170,7 +170,7 @@ class SummarizationModule(BaseTransformer): def training_step(self, batch, batch_idx) -> Dict: loss_tensors = self._step(batch) - logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} + logs = dict(zip(self.loss_names, loss_tensors)) # tokens per batch logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["bs"] = batch["input_ids"].shape[0] @@ -225,7 +225,7 @@ class SummarizationModule(BaseTransformer): preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["labels"]) loss_tensors = self._step(batch) - base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} + base_metrics = dict(zip(self.loss_names, loss_tensors)) rouge: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) diff --git a/examples/research_projects/zero-shot-distillation/distill_classifier.py b/examples/research_projects/zero-shot-distillation/distill_classifier.py index 16d5221437..3325c3aae0 100644 --- a/examples/research_projects/zero-shot-distillation/distill_classifier.py +++ b/examples/research_projects/zero-shot-distillation/distill_classifier.py @@ -303,7 +303,7 @@ def main(): student_args.student_name_or_path, num_labels=len(class_names) ) tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer) - model.config.id2label = {i: label for i, label in enumerate(class_names)} + model.config.id2label = dict(enumerate(class_names)) model.config.label2id = {label: i for i, label in enumerate(class_names)} # 4. train student on teacher predictions diff --git a/src/transformers/benchmark/benchmark_utils.py b/src/transformers/benchmark/benchmark_utils.py index bde10f6712..b7008a7ab7 100644 --- a/src/transformers/benchmark/benchmark_utils.py +++ b/src/transformers/benchmark/benchmark_utils.py @@ -610,7 +610,7 @@ class Benchmark(ABC): model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names } else: - self.config_dict = {model_name: config for model_name, config in zip(self.args.model_names, configs)} + self.config_dict = dict(zip(self.args.model_names, configs)) warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index e89216b0d8..5f4627c3d9 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -399,9 +399,9 @@ class TrainingSummary: dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) - dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)} - dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)} - dataset_metadata_mapping = {tag: metadata for tag, metadata in zip(dataset_tags, dataset_metadata)} + dataset_mapping = dict(zip(dataset_tags, dataset_names)) + dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) + dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = { task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING diff --git a/src/transformers/models/esm/tokenization_esm.py b/src/transformers/models/esm/tokenization_esm.py index 232ce61fb7..83a1b415b0 100644 --- a/src/transformers/models/esm/tokenization_esm.py +++ b/src/transformers/models/esm/tokenization_esm.py @@ -57,7 +57,7 @@ class EsmTokenizer(PreTrainedTokenizer): def __init__(self, vocab_file, **kwargs): super().__init__(**kwargs) self.all_tokens = load_vocab_file(vocab_file) - self._id_to_token = {ind: tok for ind, tok in enumerate(self.all_tokens)} + self._id_to_token = dict(enumerate(self.all_tokens)) self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} self.unk_token = "" self.cls_token = "" diff --git a/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py index 1942f03666..df7c7aa7e1 100644 --- a/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py @@ -111,7 +111,7 @@ class OriginalMaskFormerConfigToOursConverter: swin = model.SWIN dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST[0]) - id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} + id2label = dict(enumerate(dataset_catalog.stuff_classes)) label2id = {label: idx for idx, label in id2label.items()} config: MaskFormerConfig = MaskFormerConfig( diff --git a/src/transformers/models/oneformer/convert_to_hf_oneformer.py b/src/transformers/models/oneformer/convert_to_hf_oneformer.py index 9dbd32f9d3..cb93857ad8 100644 --- a/src/transformers/models/oneformer/convert_to_hf_oneformer.py +++ b/src/transformers/models/oneformer/convert_to_hf_oneformer.py @@ -122,7 +122,7 @@ class OriginalOneFormerConfigToOursConverter: model = original_config.MODEL dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0]) - id2label = {idx: label for idx, label in enumerate(dataset_catalog.stuff_classes)} + id2label = dict(enumerate(dataset_catalog.stuff_classes)) label2id = {label: idx for idx, label in id2label.items()} if is_swin: diff --git a/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py b/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py index 0a4bc48dea..5e726aa9fd 100644 --- a/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py +++ b/src/transformers/models/speech_encoder_decoder/convert_speech_to_text_wav2vec2_seq2seq_original_to_pytorch.py @@ -207,7 +207,7 @@ def create_vocab_dict(dict_path): "": 3, } - vocab_dict.update({k: v for k, v in zip(words, range(4, num_words + 4))}) + vocab_dict.update(dict(zip(words, range(4, num_words + 4)))) return vocab_dict diff --git a/src/transformers/onnx/convert.py b/src/transformers/onnx/convert.py index 288c2574e1..9e9cc93c06 100644 --- a/src/transformers/onnx/convert.py +++ b/src/transformers/onnx/convert.py @@ -179,9 +179,7 @@ def export_pytorch( f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, - dynamic_axes={ - name: axes for name, axes in chain(config.inputs.items(), config.outputs.items()) - }, + dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, use_external_data_format=config.use_external_data_format(model.num_parameters()), enable_onnx_checker=True, @@ -208,7 +206,7 @@ def export_pytorch( f=output.as_posix(), input_names=list(config.inputs.keys()), output_names=onnx_outputs, - dynamic_axes={name: axes for name, axes in chain(config.inputs.items(), config.outputs.items())}, + dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), do_constant_folding=True, opset_version=opset, ) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index f8c052385c..78f49a5e2d 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -418,7 +418,7 @@ class DocumentQuestionAnsweringPipeline(ChunkPipeline): else: model_outputs = self.model(**model_inputs) - model_outputs = {k: v for (k, v) in model_outputs.items()} + model_outputs = dict(model_outputs.items()) model_outputs["p_mask"] = p_mask model_outputs["word_ids"] = word_ids model_outputs["words"] = words diff --git a/src/transformers/utils/generic.py b/src/transformers/utils/generic.py index 21e9cf514f..3c3ade198b 100644 --- a/src/transformers/utils/generic.py +++ b/src/transformers/utils/generic.py @@ -282,7 +282,7 @@ class ModelOutput(OrderedDict): def __getitem__(self, k): if isinstance(k, str): - inner_dict = {k: v for (k, v) in self.items()} + inner_dict = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] diff --git a/tests/models/mask2former/test_image_processing_mask2former.py b/tests/models/mask2former/test_image_processing_mask2former.py index e21c8e6770..9f76f678f3 100644 --- a/tests/models/mask2former/test_image_processing_mask2former.py +++ b/tests/models/mask2former/test_image_processing_mask2former.py @@ -298,9 +298,7 @@ class Mask2FormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.Te high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 - instance_id_to_semantic_id = { - instance_id: label_id for instance_id, label_id in enumerate(labels_expanded) - } + instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] diff --git a/tests/models/maskformer/test_image_processing_maskformer.py b/tests/models/maskformer/test_image_processing_maskformer.py index 694029603b..128d232d4c 100644 --- a/tests/models/maskformer/test_image_processing_maskformer.py +++ b/tests/models/maskformer/test_image_processing_maskformer.py @@ -298,9 +298,7 @@ class MaskFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.Tes high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 - instance_id_to_semantic_id = { - instance_id: label_id for instance_id, label_id in enumerate(labels_expanded) - } + instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] diff --git a/tests/models/oneformer/test_image_processing_oneformer.py b/tests/models/oneformer/test_image_processing_oneformer.py index 8dec5c4070..d6fc5e228b 100644 --- a/tests/models/oneformer/test_image_processing_oneformer.py +++ b/tests/models/oneformer/test_image_processing_oneformer.py @@ -329,9 +329,7 @@ class OneFormerImageProcessingTest(ImageProcessingSavingTestMixin, unittest.Test high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 - instance_id_to_semantic_id = { - instance_id: label_id for instance_id, label_id in enumerate(labels_expanded) - } + instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] diff --git a/tests/models/oneformer/test_processor_oneformer.py b/tests/models/oneformer/test_processor_oneformer.py index 5ce677cba6..e97895d7ce 100644 --- a/tests/models/oneformer/test_processor_oneformer.py +++ b/tests/models/oneformer/test_processor_oneformer.py @@ -401,9 +401,7 @@ class OneFormerProcessingTest(unittest.TestCase): high = num_labels if is_instance_map: labels_expanded = list(range(num_labels)) * 2 - instance_id_to_semantic_id = { - instance_id: label_id for instance_id, label_id in enumerate(labels_expanded) - } + instance_id_to_semantic_id = dict(enumerate(labels_expanded)) annotations = [ np.random.randint(0, high * 2, (img.size[1], img.size[0])).astype(np.uint8) for img in image_inputs ] diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index d266438ac3..6e1bbe96f6 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -56,11 +56,8 @@ if is_torch_available(): @is_pipeline_test class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase): - model_mapping = { - k: v - for k, v in (list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else []) - + (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else []) - } + model_mapping = dict((list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else []) + + (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else [])) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index b06672047a..d736b7eb41 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -80,14 +80,11 @@ def mask_to_test_readable_only_shape(mask: Image) -> Dict: @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): - model_mapping = { - k: v - for k, v in ( + model_mapping = dict(( list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else [] ) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) - + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) - } + + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else [])) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor)