mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
[Awq
] Add peft support for AWQ (#28987)
* add peft support for AWQ * Update src/transformers/quantizers/quantizer_awq.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * fix --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
This commit is contained in:
parent
ce4fff0be7
commit
864c8e6ea3
@ -11,8 +11,11 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import importlib.metadata
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from packaging import version
|
||||
|
||||
from .base import HfQuantizer
|
||||
|
||||
|
||||
@ -105,6 +108,6 @@ class AwqQuantizer(HfQuantizer):
|
||||
|
||||
@property
|
||||
def is_trainable(self):
|
||||
# AWQ does not support neither QAT (Quantization Aware Training or PEFT yet.)
|
||||
# TODO: if this is supported in the future, do a version check here.
|
||||
return False
|
||||
# AWQ supports PEFT fine-tuning from version 0.2.0
|
||||
MIN_AWQ_VERSION_FOR_PEFT = "0.2.0"
|
||||
return version.parse(importlib.metadata.version("autoawq")) >= version.parse(MIN_AWQ_VERSION_FOR_PEFT)
|
||||
|
Loading…
Reference in New Issue
Block a user