hqq - fix weight check in check_quantized_param (#30748)

* hqq - fix weight check in check_quantized_param

* ruff format
This commit is contained in:
mobicham 2024-05-10 19:29:35 +02:00 committed by GitHub
parent 8ce4fefc52
commit e0c3cee170
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 1 additions and 1 deletions

View File

@ -101,7 +101,7 @@ class HqqHfQuantizer(HfQuantizer):
) -> bool:
module, tensor_name = get_module_from_name(model, param_name)
return isinstance(module, torch.nn.Linear)
return isinstance(module, torch.nn.Linear) and (tensor_name == "weight")
def create_quantized_param(
self,