Logging message for `` is_bitsandbytes_available() `` (#38528)
Some checks are pending
Self-hosted runner (benchmark) / Benchmark (aws-g5-4xlarge-cache) (push) Waiting to run
Build documentation / build (push) Waiting to run
Slow tests on important models (on Push - A10) / Get all modified files (push) Waiting to run
Slow tests on important models (on Push - A10) / Slow & FA2 tests (push) Blocked by required conditions
Self-hosted runner (push-caller) / Check if setup was changed (push) Waiting to run
Self-hosted runner (push-caller) / build-docker-containers (push) Blocked by required conditions
Self-hosted runner (push-caller) / Trigger Push CI (push) Blocked by required conditions
Secret Leaks / trufflehog (push) Waiting to run
Update Transformers metadata / build_and_package (push) Waiting to run

* bnb import log

* bnb import log

* log mesage change

* moved error issue in qunatizer_bnb_4_bit.py

* ruff

* arg added for bnb check

* required changes

---------

Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com>
This commit is contained in:
वेदांत 2025-06-10 15:45:01 +05:30 committed by GitHub
parent 04cdf83244
commit 71f7385942
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 36 additions and 10 deletions

View File

@ -72,10 +72,23 @@ class Bnb4BitHfQuantizer(HfQuantizer):
raise ImportError(
f"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
if not is_bitsandbytes_available():
if not is_bitsandbytes_available(check_library_only=True):
raise ImportError(
"Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
)
if not is_torch_available():
raise ImportError(
"The bitsandbytes library requires PyTorch but it was not found in your environment. "
"You can install it with `pip install torch`."
)
# `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time,
# so those versions of the library are practically only available when CUDA is too.
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.1"):
if not torch.cuda.is_available():
raise ImportError(
"The installed version of bitsandbytes (<0.43.1) requires CUDA, but CUDA is not available. "
"You may need to install PyTorch with CUDA support or upgrade bitsandbytes to >=0.43.1."
)
from ..integrations import validate_bnb_backend_availability
from ..utils import is_bitsandbytes_multi_backend_available
@ -110,12 +123,6 @@ class Bnb4BitHfQuantizer(HfQuantizer):
"for more details. "
)
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.39.0"):
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit inference and training"
" make sure you have the latest version of `bitsandbytes` installed"
)
def adjust_target_dtype(self, target_dtype: "torch.dtype") -> "torch.dtype":
if version.parse(importlib.metadata.version("accelerate")) > version.parse("0.19.0"):
from accelerate.utils import CustomDtype

View File

@ -69,10 +69,23 @@ class Bnb8BitHfQuantizer(HfQuantizer):
raise ImportError(
f"Using `bitsandbytes` 8-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
)
if not is_bitsandbytes_available():
if not is_bitsandbytes_available(check_library_only=True):
raise ImportError(
"Using `bitsandbytes` 8-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
)
if not is_torch_available():
raise ImportError(
"The bitsandbytes library requires PyTorch but it was not found in your environment. "
"You can install it with `pip install torch`."
)
# `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time,
# so those versions of the library are practically only available when CUDA is too.
if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.1"):
if not torch.cuda.is_available():
raise ImportError(
"The installed version of bitsandbytes (<0.43.1) requires CUDA, but CUDA is not available. "
"You may need to install PyTorch with CUDA support or upgrade bitsandbytes to >=0.43.1."
)
from ..integrations import validate_bnb_backend_availability
from ..utils import is_bitsandbytes_multi_backend_available

View File

@ -995,8 +995,14 @@ def is_torch_xpu_available(check_device=False):
@lru_cache()
def is_bitsandbytes_available():
if not is_torch_available() or not _bitsandbytes_available:
def is_bitsandbytes_available(check_library_only=False) -> bool:
if not _bitsandbytes_available:
return False
if check_library_only:
return True
if not is_torch_available():
return False
import torch