mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-23 22:38:58 +06:00

* update HQQ transformers integration * push import_utils.py * add force_hooks check in modeling_utils.py * fix | with Optional * force bias as param * check bias is Tensor * force forward for multi-gpu * review fixes pass * remove torch grad() * if any key in linear_tags fix * add cpu/disk check * isinstance return * add multigpu test + refactor tests * clean hqq_utils imports in hqq.py * clean hqq_utils imports in quantizer_hqq.py * delete hqq_utils.py * Delete src/transformers/utils/hqq_utils.py * ruff init * remove torch.float16 from __init__ in test * refactor test * isinstance -> type in quantizer_hqq.py * cpu/disk device_map check in quantizer_hqq.py * remove type(module) nn.linear check in quantizer_hqq.py * add BaseQuantizeConfig import inside HqqConfig init * remove hqq import in hqq.py * remove accelerate import from test_hqq.py * quant config.py doc update * add hqqconfig to main_classes doc * make style * __init__ fix * ruff __init__ * skip_modules list * hqqconfig format fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * hqqconfig doc fix * test_hqq.py remove mistral comment * remove self.using_multi_gpu is False * torch_dtype default val set and logger.info * hqq.py isinstance fix * remove torch=None * torch_device test_hqq * rename test_hqq * MODEL_ID in test_hqq * quantizer_hqq setattr fix * quantizer_hqq typo fix * imports quantizer_hqq.py * isinstance quantizer_hqq * hqq_layer.bias reformat quantizer_hqq * Step 2 as comment in quantizer_hqq * prepare_for_hqq_linear() comment * keep_in_fp32_modules fix * HqqHfQuantizer reformat * quantization.md hqqconfig * quantization.md model example reformat * quantization.md # space * quantization.md space }) * quantization.md space }) * quantization_config fix doc Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * axis value check in quantization_config * format * dynamic config explanation * quant config method in quantization.md * remove shard-level progress * .cuda fix modeling_utils * test_hqq fixes * make fix-copies --------- Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com>
63 lines
2.7 KiB
Docker
Executable File
63 lines
2.7 KiB
Docker
Executable File
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
|
|
LABEL maintainer="Hugging Face"
|
|
|
|
ARG DEBIAN_FRONTEND=noninteractive
|
|
|
|
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
|
|
SHELL ["sh", "-lc"]
|
|
|
|
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
|
# to be used as arguments for docker build (so far).
|
|
|
|
ARG PYTORCH='2.2.1'
|
|
# Example: `cu102`, `cu113`, etc.
|
|
ARG CUDA='cu118'
|
|
|
|
RUN apt update
|
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python python3-pip ffmpeg
|
|
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
|
|
|
ARG REF=main
|
|
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
|
|
|
RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile
|
|
RUN echo torch=$VERSION
|
|
# `torchvision` and `torchaudio` should be installed along with `torch`, especially for nightly build.
|
|
# Currently, let's just use their latest releases (when `torch` is installed with a release version)
|
|
RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA
|
|
|
|
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch]
|
|
|
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
|
|
|
# needed in bnb and awq
|
|
RUN python3 -m pip install --no-cache-dir einops
|
|
|
|
# Add bitsandbytes for mixed int8 testing
|
|
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
|
|
|
# Add auto-gptq for gtpq quantization testing
|
|
RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
|
|
|
# Add optimum for gptq quantization testing
|
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
|
|
|
|
# Add aqlm for quantization testing
|
|
RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2
|
|
|
|
# Add hqq for quantization testing
|
|
RUN python3 -m pip install --no-cache-dir hqq
|
|
|
|
# Add autoawq for quantization testing
|
|
# >=v0.2.3 needed for compatibility with torch 2.2.1
|
|
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+cu118-cp38-cp38-linux_x86_64.whl
|
|
|
|
# Add quanto for quantization testing
|
|
RUN python3 -m pip install --no-cache-dir quanto
|
|
|
|
# Add eetq for quantization testing
|
|
RUN python3 -m pip install git+https://github.com/NetEase-FuXi/EETQ.git
|
|
|
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
|
# this line must be added in order for python to be aware of transformers.
|
|
RUN cd transformers && python3 setup.py develop |