Clearer error for SDPA when explicitely requested (#28006)
* clearer error for sdpa * better message
This commit is contained in:
parent
fe23256b73
commit
02f8738ef8
|
@ -1540,8 +1540,9 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
|||
if hard_check_only:
|
||||
if not cls._supports_sdpa:
|
||||
raise ValueError(
|
||||
f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please open an issue on GitHub to "
|
||||
"request support for this architecture: https://github.com/huggingface/transformers/issues/new"
|
||||
f"{cls.__name__} does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet."
|
||||
" Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe"
|
||||
' this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation="eager"` meanwhile. Example: `model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="eager")`'
|
||||
)
|
||||
if not is_torch_sdpa_available():
|
||||
raise ImportError(
|
||||
|
|
Loading…
Reference in New Issue