2022-07-28 04:16:04 +00:00
|
|
|
"""
|
2022-08-01 16:32:47 +00:00
|
|
|
extract factors the build is dependent on:
|
|
|
|
[X] compute capability
|
|
|
|
[ ] TODO: Q - What if we have multiple GPUs of different makes?
|
2022-07-28 04:16:04 +00:00
|
|
|
- CUDA version
|
|
|
|
- Software:
|
|
|
|
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
|
|
|
|
- CuBLAS-LT: full-build 8-bit optimizer
|
|
|
|
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
|
|
|
|
|
|
|
|
alle Binaries packagen
|
|
|
|
|
|
|
|
evaluation:
|
|
|
|
- if paths faulty, return meaningful error
|
|
|
|
- else:
|
|
|
|
- determine CUDA version
|
|
|
|
- determine capabilities
|
|
|
|
- based on that set the default path
|
|
|
|
"""
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
import ctypes
|
2022-08-01 16:32:47 +00:00
|
|
|
import shlex
|
|
|
|
import subprocess
|
2022-07-28 04:16:04 +00:00
|
|
|
from os import environ as env
|
|
|
|
from pathlib import Path
|
|
|
|
from typing import Set, Union
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
from .utils import print_err, warn_of_missing_prerequisite
|
|
|
|
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 16:32:47 +00:00
|
|
|
def execute_and_return(command_string: str) -> Tuple[str, str]:
|
|
|
|
def _decode(subprocess_err_out_tuple):
|
|
|
|
return tuple(
|
|
|
|
to_decode.decode("UTF-8").strip()
|
|
|
|
for to_decode in subprocess_err_out_tuple
|
|
|
|
)
|
|
|
|
|
|
|
|
def execute_and_return_decoded_std_streams(command_string):
|
|
|
|
return _decode(
|
|
|
|
subprocess.Popen(
|
|
|
|
shlex.split(command_string),
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
).communicate()
|
|
|
|
)
|
|
|
|
|
|
|
|
std_out, std_err = execute_and_return_decoded_std_streams()
|
|
|
|
return std_out, std_err
|
|
|
|
|
|
|
|
|
2022-08-01 00:47:44 +00:00
|
|
|
def check_cuda_result(cuda, result_val):
|
|
|
|
if result_val != 0:
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: undefined name 'error_str'
|
2022-08-01 00:47:44 +00:00
|
|
|
cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
|
2022-08-01 16:32:47 +00:00
|
|
|
print("Count not initialize CUDA - failure!")
|
2022-08-01 10:31:48 +00:00
|
|
|
raise Exception("CUDA exception!")
|
2022-08-01 00:47:44 +00:00
|
|
|
return result_val
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-08-01 00:47:44 +00:00
|
|
|
# taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
|
|
|
|
def get_compute_capability():
|
2022-08-01 10:31:48 +00:00
|
|
|
libnames = ("libcuda.so", "libcuda.dylib", "cuda.dll")
|
2022-08-01 00:47:44 +00:00
|
|
|
for libname in libnames:
|
|
|
|
try:
|
|
|
|
cuda = ctypes.CDLL(libname)
|
|
|
|
except OSError:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
else:
|
2022-08-01 10:31:48 +00:00
|
|
|
raise OSError("could not load any of: " + " ".join(libnames))
|
2022-08-01 00:47:44 +00:00
|
|
|
|
|
|
|
nGpus = ctypes.c_int()
|
|
|
|
cc_major = ctypes.c_int()
|
|
|
|
cc_minor = ctypes.c_int()
|
|
|
|
|
|
|
|
result = ctypes.c_int()
|
|
|
|
device = ctypes.c_int()
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: local variable 'context' is assigned to but never used
|
2022-08-01 00:47:44 +00:00
|
|
|
context = ctypes.c_void_p()
|
2022-08-01 16:32:47 +00:00
|
|
|
# TODO: local variable 'error_str' is assigned to but never used
|
2022-08-01 00:47:44 +00:00
|
|
|
error_str = ctypes.c_char_p()
|
|
|
|
|
|
|
|
result = check_cuda_result(cuda, cuda.cuInit(0))
|
|
|
|
|
|
|
|
result = check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
|
|
|
|
ccs = []
|
|
|
|
for i in range(nGpus.value):
|
2022-08-01 16:32:47 +00:00
|
|
|
result = check_cuda_result(
|
|
|
|
cuda, cuda.cuDeviceGet(ctypes.byref(device), i)
|
|
|
|
)
|
2022-08-01 10:31:48 +00:00
|
|
|
result = check_cuda_result(
|
|
|
|
cuda,
|
|
|
|
cuda.cuDeviceComputeCapability(
|
|
|
|
ctypes.byref(cc_major), ctypes.byref(cc_minor), device
|
|
|
|
),
|
|
|
|
)
|
|
|
|
ccs.append(f"{cc_major.value}.{cc_minor.value}")
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
# TODO: handle different compute capabilities; for now, take the max
|
2022-08-01 00:47:44 +00:00
|
|
|
ccs.sort()
|
2022-08-01 10:31:48 +00:00
|
|
|
# return ccs[-1]
|
|
|
|
return ccs
|
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
|
|
|
|
CUDA_RUNTIME_LIB: str = "libcudart.so"
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
def tokenize_paths(paths: str) -> Set[Path]:
|
2022-08-01 10:31:48 +00:00
|
|
|
return {Path(ld_path) for ld_path in paths.split(":") if ld_path}
|
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
|
|
|
|
def get_cuda_runtime_lib_path(
|
|
|
|
# TODO: replace this with logic for all paths in env vars
|
|
|
|
LD_LIBRARY_PATH: Union[str, None] = env.get("LD_LIBRARY_PATH")
|
|
|
|
) -> Union[Path, None]:
|
2022-08-01 10:31:48 +00:00
|
|
|
"""# TODO: add doc-string"""
|
2022-07-28 04:16:04 +00:00
|
|
|
|
|
|
|
if not LD_LIBRARY_PATH:
|
|
|
|
warn_of_missing_prerequisite(
|
2022-08-01 10:31:48 +00:00
|
|
|
"LD_LIBRARY_PATH is completely missing from environment!"
|
2022-07-28 04:16:04 +00:00
|
|
|
)
|
|
|
|
return None
|
|
|
|
|
|
|
|
ld_library_paths: Set[Path] = tokenize_paths(LD_LIBRARY_PATH)
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
non_existent_directories: Set[Path] = {
|
|
|
|
path for path in ld_library_paths if not path.exists()
|
2022-07-28 04:16:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if non_existent_directories:
|
|
|
|
print_err(
|
|
|
|
"WARNING: The following directories listed your path were found to "
|
|
|
|
f"be non-existent: {non_existent_directories}"
|
|
|
|
)
|
|
|
|
|
|
|
|
cuda_runtime_libs: Set[Path] = {
|
2022-08-01 10:31:48 +00:00
|
|
|
path / CUDA_RUNTIME_LIB
|
|
|
|
for path in ld_library_paths
|
2022-07-28 04:16:04 +00:00
|
|
|
if (path / CUDA_RUNTIME_LIB).is_file()
|
|
|
|
} - non_existent_directories
|
|
|
|
|
|
|
|
if len(cuda_runtime_libs) > 1:
|
2022-08-01 16:32:47 +00:00
|
|
|
err_msg = (
|
|
|
|
f"Found duplicate {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
|
|
|
|
)
|
2022-07-28 04:16:04 +00:00
|
|
|
raise FileNotFoundError(err_msg)
|
|
|
|
|
|
|
|
elif len(cuda_runtime_libs) < 1:
|
2022-08-01 16:32:47 +00:00
|
|
|
err_msg = (
|
|
|
|
f"Did not find {CUDA_RUNTIME_LIB} files: {cuda_runtime_libs}.."
|
|
|
|
)
|
2022-07-28 04:16:04 +00:00
|
|
|
raise FileNotFoundError(err_msg)
|
|
|
|
|
|
|
|
single_cuda_runtime_lib_dir = next(iter(cuda_runtime_libs))
|
2022-08-01 00:47:44 +00:00
|
|
|
return single_cuda_runtime_lib_dir
|
2022-07-28 04:16:04 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-07-28 04:16:04 +00:00
|
|
|
def evaluate_cuda_setup():
|
2022-08-01 00:47:44 +00:00
|
|
|
cuda_path = get_cuda_runtime_lib_path()
|
|
|
|
cc = get_compute_capability()
|
2022-08-01 10:31:48 +00:00
|
|
|
binary_name = "libbitsandbytes_cpu.so"
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:22:12 +00:00
|
|
|
if not (has_gpu := bool(cc)):
|
2022-08-01 10:31:48 +00:00
|
|
|
print(
|
|
|
|
"WARNING: No GPU detected! Check our CUDA paths. Processing to load CPU-only library..."
|
|
|
|
)
|
2022-08-01 00:47:44 +00:00
|
|
|
return binary_name
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
has_cublaslt = cc in ["7.5", "8.0", "8.6"]
|
2022-08-01 00:47:44 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
# TODO:
|
2022-08-01 00:47:44 +00:00
|
|
|
# (1) Model missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
|
|
|
|
# (2) Multiple CUDA versions installed
|
|
|
|
|
|
|
|
cuda_home = str(Path(cuda_path).parent.parent)
|
2022-08-01 10:31:48 +00:00
|
|
|
ls_output, err = execute_and_return(f"{cuda_home}/bin/nvcc --version")
|
|
|
|
cuda_version = (
|
|
|
|
ls_output.split("\n")[3].split(",")[-1].strip().lower().replace("v", "")
|
|
|
|
)
|
|
|
|
major, minor, revision = cuda_version.split(".")
|
|
|
|
cuda_version_string = f"{major}{minor}"
|
2022-08-01 00:47:44 +00:00
|
|
|
|
|
|
|
binary_name = f'libbitsandbytes_cuda{cuda_version_string}_{("cublaslt" if has_cublaslt else "")}.so'
|
|
|
|
|
|
|
|
return binary_name
|