2022-07-28 04:16:04 +00:00
|
|
|
"""
|
2022-08-01 16:32:47 +00:00
|
|
|
extract factors the build is dependent on:
|
|
|
|
[X] compute capability
|
|
|
|
[ ] TODO: Q - What if we have multiple GPUs of different makes?
|
2022-07-28 04:16:04 +00:00
|
|
|
- CUDA version
|
|
|
|
- Software:
|
|
|
|
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
|
|
|
|
- CuBLAS-LT: full-build 8-bit optimizer
|
|
|
|
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
|
|
|
|
|
|
|
|
alle Binaries packagen
|
|
|
|
|
|
|
|
evaluation:
|
|
|
|
- if paths faulty, return meaningful error
|
|
|
|
- else:
|
|
|
|
- determine CUDA version
|
|
|
|
- determine capabilities
|
|
|
|
- based on that set the default path
|
|
|
|
"""
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
import ctypes
|
2022-08-02 02:22:41 +00:00
|
|
|
import os
|
2022-07-28 04:16:04 +00:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Set, Union
|
|
|
|
|
2022-08-02 02:22:41 +00:00
|
|
|
from .utils import print_err, warn_of_missing_prerequisite, execute_and_return
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-08-01 00:47:44 +00:00
|
|
|
|
|
|
|
def check_cuda_result(cuda, result_val):
|
|
|
|
if result_val != 0:
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: undefined name 'error_str'
|
2022-08-01 00:47:44 +00:00
|
|
|
cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
|
2022-08-01 16:32:47 +00:00
|
|
|
print("Count not initialize CUDA - failure!")
|
2022-08-01 10:31:48 +00:00
|
|
|
raise Exception("CUDA exception!")
|
2022-08-01 00:47:44 +00:00
|
|
|
return result_val
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-08-01 00:47:44 +00:00
|
|
|
# taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
|
|
|
|
def get_compute_capability():
|
2022-08-01 10:31:48 +00:00
|
|
|
libnames = ("libcuda.so", "libcuda.dylib", "cuda.dll")
|
2022-08-01 00:47:44 +00:00
|
|
|
for libname in libnames:
|
|
|
|
try:
|
|
|
|
cuda = ctypes.CDLL(libname)
|
|
|
|
except OSError:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
else:
|
2022-08-01 10:31:48 +00:00
|
|
|
raise OSError("could not load any of: " + " ".join(libnames))
|
2022-08-01 00:47:44 +00:00
|
|
|
|
|
|
|
nGpus = ctypes.c_int()
|
|
|
|
cc_major = ctypes.c_int()
|
|
|
|
cc_minor = ctypes.c_int()
|
|
|
|
|
|
|
|
result = ctypes.c_int()
|
|
|
|
device = ctypes.c_int()
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: local variable 'context' is assigned to but never used
|
2022-08-01 00:47:44 +00:00
|
|
|
context = ctypes.c_void_p()
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: local variable 'error_str' is assigned to but never used
|
2022-08-01 00:47:44 +00:00
|
|
|
error_str = ctypes.c_char_p()
|
|
|
|
|
|
|
|
result = check_cuda_result(cuda, cuda.cuInit(0))
|
|
|
|
|
|
|
|
result = check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
|
|
|
|
ccs = []
|
|
|
|
for i in range(nGpus.value):
|
2022-08-01 16:32:47 +00:00
|
|
|
result = check_cuda_result(
|
|
|
|
cuda, cuda.cuDeviceGet(ctypes.byref(device), i)
|
|
|
|
)
|
2022-08-01 10:31:48 +00:00
|
|
|
result = check_cuda_result(
|
|
|
|
cuda,
|
|
|
|
cuda.cuDeviceComputeCapability(
|
|
|
|
ctypes.byref(cc_major), ctypes.byref(cc_minor), device
|
|
|
|
),
|
|
|
|
)
|
|
|
|
ccs.append(f"{cc_major.value}.{cc_minor.value}")
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
# TODO: handle different compute capabilities; for now, take the max
|
2022-08-01 00:47:44 +00:00
|
|
|
ccs.sort()
|
2022-08-01 10:31:48 +00:00
|
|
|
# return ccs[-1]
|
|
|
|
return ccs
|
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
|
|
|
|
CUDA_RUNTIME_LIB: str = "libcudart.so"
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
def tokenize_paths(paths: str) -> Set[Path]:
|
2022-08-01 10:31:48 +00:00
|
|
|
return {Path(ld_path) for ld_path in paths.split(":") if ld_path}
|
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
|
2022-08-02 02:22:41 +00:00
|
|
|
def resolve_env_variable(env_var):
|
|
|
|
paths: Set[Path] = tokenize_paths(env_var)
|
2022-07-28 04:16:04 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
non_existent_directories: Set[Path] = {
|
2022-08-02 02:22:41 +00:00
|
|
|
path for path in paths if not path.exists()
|
2022-07-28 04:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if non_existent_directories:
|
|
|
|
print_err(
|
|
|
|
"WARNING: The following directories listed your path were found to "
|
|
|
|
f"be non-existent: {non_existent_directories}"
|
|
|
|
)
|
|
|
|
|
|
|
|
cuda_runtime_libs: Set[Path] = {
|
2022-08-01 10:31:48 +00:00
|
|
|
path / CUDA_RUNTIME_LIB
|
2022-08-02 02:22:41 +00:00
|
|
|
for path in paths
|
2022-07-28 04:16:04 +00:00
|
|
|
if (path / CUDA_RUNTIME_LIB).is_file()
|
|
|
|
} - non_existent_directories
|
|
|
|
|
|
|
|
if len(cuda_runtime_libs) > 1:
|
2022-08-01 16:32:47 +00:00
|
|
|
err_msg = (
|
|
|
|
f"Found duplicate {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
|
|
|
|
)
|
2022-07-28 04:16:04 +00:00
|
|
|
raise FileNotFoundError(err_msg)
|
2022-08-02 02:22:41 +00:00
|
|
|
elif len(cuda_runtime_libs) == 0: return None
|
|
|
|
else: return next(iter(cuda_runtime_libs)) # for now just return the first
|
|
|
|
|
|
|
|
def get_cuda_runtime_lib_path() -> Union[Path, None]:
|
|
|
|
"""# TODO: add doc-string"""
|
|
|
|
|
|
|
|
cuda_runtime_libs = []
|
|
|
|
if 'CONDA_PREFIX' in os.environ:
|
|
|
|
lib_conda_path = f'{os.environ["CONDA_PREFIX"]}/lib/'
|
|
|
|
print(lib_conda_path)
|
|
|
|
cuda_runtime_libs.append(resolve_env_variable(lib_conda_path))
|
|
|
|
|
|
|
|
if len(cuda_runtime_libs) == 1: return cuda_runtime_libs[0]
|
|
|
|
|
|
|
|
for var in os.environ:
|
|
|
|
cuda_runtime_libs.append(resolve_env_variable(var))
|
2022-07-28 04:16:04 +00:00
|
|
|
|
2022-08-02 02:22:41 +00:00
|
|
|
if len(cuda_runtime_libs) < 1:
|
2022-08-01 16:32:47 +00:00
|
|
|
err_msg = (
|
|
|
|
f"Did not find {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
|
|
|
|
)
|
2022-07-28 04:16:04 +00:00
|
|
|
raise FileNotFoundError(err_msg)
|
|
|
|
|
2022-08-02 02:22:41 +00:00
|
|
|
return cuda_runtime_libs.pop()
|
2022-07-28 04:16:04 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
def evaluate_cuda_setup():
|
2022-08-01 00:47:44 +00:00
|
|
|
cuda_path = get_cuda_runtime_lib_path()
|
2022-08-02 02:22:41 +00:00
|
|
|
print(f'CUDA SETUP: CUDA path found: {cuda_path}')
|
2022-08-01 00:47:44 +00:00
|
|
|
cc = get_compute_capability()
|
2022-08-01 10:31:48 +00:00
|
|
|
binary_name = "libbitsandbytes_cpu.so"
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:22:12 +00:00
|
|
|
if not (has_gpu := bool(cc)):
|
2022-08-01 10:31:48 +00:00
|
|
|
print(
|
|
|
|
"WARNING: No GPU detected! Check our CUDA paths. Processing to load CPU-only library..."
|
|
|
|
)
|
2022-08-01 00:47:44 +00:00
|
|
|
return binary_name
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
has_cublaslt = cc in ["7.5", "8.0", "8.6"]
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
# TODO:
|
2022-08-01 00:47:44 +00:00
|
|
|
# (1) Model missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
|
|
|
|
# (2) Multiple CUDA versions installed
|
|
|
|
|
|
|
|
cuda_home = str(Path(cuda_path).parent.parent)
|
2022-08-02 02:22:41 +00:00
|
|
|
ls_output, err = execute_and_return(f"ls -l {cuda_path}")
|
|
|
|
major, minor, revision = ls_output.split(' ')[-1].replace('libcudart.so.', '').split('.')
|
2022-08-01 10:31:48 +00:00
|
|
|
cuda_version_string = f"{major}{minor}"
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-02 02:22:41 +00:00
|
|
|
binary_name = f'libbitsandbytes_cuda{cuda_version_string}{("" if has_cublaslt else "_nocublaslt")}.so'
|
2022-08-01 00:47:44 +00:00
|
|
|
|
|
|
|
return binary_name
|