Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

python312Packages.bitsandbytes: 0.43.1 -> 0.43.3 #343246

Merged
merged 1 commit into from
Oct 30, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 58 additions & 45 deletions pkgs/development/python-modules/bitsandbytes/default.nix
Original file line number Diff line number Diff line change
@@ -1,39 +1,44 @@
{
lib,
torch,
symlinkJoin,
buildPythonPackage,
fetchFromGitHub,
python,
pythonOlder,
cmake,
setuptools,
wheel,
torch,
scipy,
symlinkJoin,
}:

let
pname = "bitsandbytes";
version = "0.43.1";
version = "0.44.1";

inherit (torch) cudaPackages cudaSupport;
inherit (cudaPackages) cudaVersion;

inherit (torch) cudaCapabilities cudaPackages cudaSupport;
inherit (cudaPackages) backendStdenv cudaVersion;
cudaVersionString = lib.replaceStrings [ "." ] [ "" ] (lib.versions.majorMinor cudaVersion);
SuperSandro2000 marked this conversation as resolved.
Show resolved Hide resolved

# NOTE: torchvision doesn't use cudnn; torch does!
# For this reason it is not included.
cuda-common-redist = with cudaPackages; [
cuda_cccl # <thrust/*>
libcublas # cublas_v2.h
(lib.getDev cuda_cccl) # <thrust/*>
(lib.getDev libcublas) # cublas_v2.h
(lib.getLib libcublas)
libcurand
libcusolver # cusolverDn.h
libcusparse # cusparse.h
(lib.getDev libcusparse) # cusparse.h
(lib.getLib libcusparse) # cusparse.h
(lib.getDev cuda_cudart) # cuda_runtime.h cuda_runtime_api.h
];

cuda-native-redist = symlinkJoin {
name = "cuda-native-redist-${cudaVersion}";
paths =
with cudaPackages;
[
cuda_cudart # cuda_runtime.h cuda_runtime_api.h
(lib.getDev cuda_cudart) # cuda_runtime.h cuda_runtime_api.h
(lib.getLib cuda_cudart)
(lib.getStatic cuda_cudart)
cuda_nvcc
]
++ cuda-common-redist;
Expand All @@ -48,47 +53,55 @@ buildPythonPackage {
inherit pname version;
pyproject = true;

disabled = pythonOlder "3.7";

src = fetchFromGitHub {
owner = "TimDettmers";
repo = "bitsandbytes";
rev = "refs/tags/${version}";
hash = "sha256-GFbFKPdV96DXPA+PZO4h0zdBclN670fb0PGv4QPHWHU=";
hash = "sha256-yvxD5ymMK5p4Xg7Csx/90mPV3yxUC6QUuF/8BKO2p0k=";
};

postPatch =
''
substituteInPlace Makefile --replace "/usr/bin/g++" "g++" --replace "lib64" "lib"
substituteInPlace bitsandbytes/cuda_setup/main.py \
--replace "binary_path = package_dir / self.binary_name" \
"binary_path = Path('$out/${python.sitePackages}/${pname}')/self.binary_name"
''
+ lib.optionalString torch.cudaSupport ''
substituteInPlace bitsandbytes/cuda_setup/main.py \
--replace "/usr/local/cuda/lib64" "${cuda-native-redist}/lib"
'';

CUDA_HOME = "${cuda-native-redist}";

preBuild =
if torch.cudaSupport then
with torch.cudaPackages;
let
cudaVersion = lib.concatStrings (lib.splitVersion torch.cudaPackages.cudaMajorMinorVersion);
in
''make CUDA_VERSION=${cudaVersion} cuda${cudaMajorVersion}x''
else
''make CUDA_VERSION=CPU cpuonly'';
# By default, which library is loaded depends on the result of `torch.cuda.is_available()`.
# When `cudaSupport` is enabled, bypass this check and load the cuda library unconditionnally.
# Indeed, in this case, only `libbitsandbytes_cuda124.so` is built. `libbitsandbytes_cpu.so` is not.
# Also, hardcode the path to the previously built library instead of relying on
# `get_cuda_bnb_library_path(cuda_specs)` which relies on `torch.cuda` too.
#
samuela marked this conversation as resolved.
Show resolved Hide resolved
# WARNING: The cuda library is currently named `libbitsandbytes_cudaxxy` for cuda version `xx.y`.
# This upstream convention could change at some point and thus break the following patch.
postPatch = lib.optionalString cudaSupport ''
substituteInPlace bitsandbytes/cextension.py \
--replace-fail "if cuda_specs:" "if True:" \
--replace-fail \
"cuda_binary_path = get_cuda_bnb_library_path(cuda_specs)" \
"cuda_binary_path = PACKAGE_DIR / 'libbitsandbytes_cuda${cudaVersionString}.so'"
'';

nativeBuildInputs = [
cmake
SuperSandro2000 marked this conversation as resolved.
Show resolved Hide resolved
cudaPackages.cuda_nvcc
];

build-system = [
setuptools
wheel
] ++ lib.optionals torch.cudaSupport [ cuda-native-redist ];
];

buildInputs = lib.optionals cudaSupport [ cuda-redist ];

cmakeFlags = [
(lib.cmakeFeature "COMPUTE_BACKEND" (if cudaSupport then "cuda" else "cpu"))
];
CUDA_HOME = "${cuda-native-redist}";
NVCC_PREPEND_FLAGS = lib.optionals cudaSupport [
"-I${cuda-native-redist}/include"
"-L${cuda-native-redist}/lib"
];

buildInputs = lib.optionals torch.cudaSupport [ cuda-redist ];
preBuild = ''
make -j $NIX_BUILD_CORES
cd .. # leave /build/source/build
'';

propagatedBuildInputs = [
dependencies = [
scipy
torch
];
Expand All @@ -97,11 +110,11 @@ buildPythonPackage {

pythonImportsCheck = [ "bitsandbytes" ];

meta = with lib; {
meta = {
description = "8-bit CUDA functions for PyTorch";
homepage = "https://github.com/TimDettmers/bitsandbytes";
changelog = "https://github.com/TimDettmers/bitsandbytes/releases/tag/${version}";
license = licenses.mit;
maintainers = with maintainers; [ bcdarwin ];
license = lib.licenses.mit;
maintainers = with lib.maintainers; [ bcdarwin ];
};
}