[tests] enable automatic speech recognition pipeline tests on XPU (#29308)
* use require_torch_gpu * enable on XPU
This commit is contained in:
parent
831bc25d8f
commit
aade711d1e
|
@ -1221,7 +1221,7 @@ class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase):
|
|||
|
||||
processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
|
||||
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
|
||||
model = model.to("cuda")
|
||||
model = model.to(torch_device)
|
||||
|
||||
pipe = pipeline(
|
||||
"automatic-speech-recognition",
|
||||
|
@ -1229,7 +1229,7 @@ class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase):
|
|||
tokenizer=processor.tokenizer,
|
||||
feature_extractor=processor.feature_extractor,
|
||||
max_new_tokens=128,
|
||||
device="cuda:0",
|
||||
device=torch_device,
|
||||
)
|
||||
|
||||
ds = load_dataset("distil-whisper/meanwhile", "default")["test"]
|
||||
|
@ -1246,7 +1246,7 @@ class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase):
|
|||
pipe = pipeline(
|
||||
"automatic-speech-recognition",
|
||||
model="facebook/seamless-m4t-v2-large",
|
||||
device="cuda:0",
|
||||
device=torch_device,
|
||||
)
|
||||
|
||||
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
|
|
Loading…
Reference in New Issue