Enhanced error handling in CUDA SETUP failures.
This commit is contained in:
parent
de354f7ded
commit
a6664de072
23
CHANGELOG.md
23
CHANGELOG.md
|
@ -67,3 +67,26 @@ Features:
|
||||||
|
|
||||||
Deprecated:
|
Deprecated:
|
||||||
- Pre-compiled release for CUDA 9.2, 10.0, 10.2 no longer available
|
- Pre-compiled release for CUDA 9.2, 10.0, 10.2 no longer available
|
||||||
|
|
||||||
|
### 0.31.0
|
||||||
|
|
||||||
|
#### 8-bit Inference and Packaging Update
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- added direct outlier extraction. This enables outlier extraction without fp16 weights without performance degradation.
|
||||||
|
- Added automatic CUDA SETUP procedure and packaging all binaries into a single bitsandbytes package.
|
||||||
|
|
||||||
|
### 0.32.0
|
||||||
|
|
||||||
|
#### 8-bit Inference Performance Enhancements
|
||||||
|
|
||||||
|
We added performance enhancements for small models. This makes small models about 2x faster for LLM.int8() inference.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- Int32 dequantization now supports fused biases.
|
||||||
|
- Linear8bitLt now uses a fused bias implementation.
|
||||||
|
- Change `.data.storage().data_ptr()` to `.data.data_ptr()` to enhance inference performance.
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Now throws and error if LLM.int8() is used on a GPU that is not supported.
|
||||||
|
- Enhances error messaging if CUDA SETUP fails.
|
||||||
|
|
|
@ -17,12 +17,17 @@ class CUDALibrary_Singleton(object):
|
||||||
binary_path = package_dir / binary_name
|
binary_path = package_dir / binary_name
|
||||||
|
|
||||||
if not binary_path.exists():
|
if not binary_path.exists():
|
||||||
print(f"CUDA_SETUP: TODO: compile library for specific version: {binary_name}")
|
print(f"CUDA SETUP: TODO: compile library for specific version: {binary_name}")
|
||||||
legacy_binary_name = "libbitsandbytes.so"
|
legacy_binary_name = "libbitsandbytes.so"
|
||||||
print(f"CUDA_SETUP: Defaulting to {legacy_binary_name}...")
|
print(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
|
||||||
self.lib = ct.cdll.LoadLibrary(package_dir / legacy_binary_name)
|
binary_path = package_dir / legacy_binary_name
|
||||||
|
if not binary_path.exists():
|
||||||
|
print('CUDA SETUP: CUDA detection failed. Either CUDA driver not installed, CUDA not installed, or you have multiple conflicting CUDA libraries!')
|
||||||
|
print('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
|
||||||
|
raise Exception('CUDA SETUP: Setup Failed!')
|
||||||
|
self.lib = ct.cdll.LoadLibrary(binary_path)
|
||||||
else:
|
else:
|
||||||
print(f"CUDA_SETUP: Loading binary {binary_path}...")
|
print(f"CUDA SETUP: Loading binary {binary_path}...")
|
||||||
self.lib = ct.cdll.LoadLibrary(binary_path)
|
self.lib = ct.cdll.LoadLibrary(binary_path)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
|
|
@ -46,7 +46,7 @@ def get_cuda_version(cuda, cudart_path):
|
||||||
minor = (version-(major*1000))//10
|
minor = (version-(major*1000))//10
|
||||||
|
|
||||||
if major < 11:
|
if major < 11:
|
||||||
print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported!')
|
print('CUDA SETUP: CUDA version lower than 11 are currenlty not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
|
||||||
|
|
||||||
return f'{major}{minor}'
|
return f'{major}{minor}'
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ def get_cuda_lib_handle():
|
||||||
cuda = ctypes.CDLL("libcuda.so")
|
cuda = ctypes.CDLL("libcuda.so")
|
||||||
except OSError:
|
except OSError:
|
||||||
# TODO: shouldn't we error or at least warn here?
|
# TODO: shouldn't we error or at least warn here?
|
||||||
print('ERROR: libcuda.so not found!')
|
raise Exception('CUDA SETUP: ERROR! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
|
||||||
return None
|
return None
|
||||||
check_cuda_result(cuda, cuda.cuInit(0))
|
check_cuda_result(cuda, cuda.cuInit(0))
|
||||||
|
|
||||||
|
@ -115,7 +115,8 @@ def get_compute_capability(cuda):
|
||||||
def evaluate_cuda_setup():
|
def evaluate_cuda_setup():
|
||||||
print('')
|
print('')
|
||||||
print('='*35 + 'BUG REPORT' + '='*35)
|
print('='*35 + 'BUG REPORT' + '='*35)
|
||||||
print('Welcome to bitsandbytes. For bug reports, please use this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
|
print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
|
||||||
|
print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
|
||||||
print('='*80)
|
print('='*80)
|
||||||
binary_name = "libbitsandbytes_cpu.so"
|
binary_name = "libbitsandbytes_cpu.so"
|
||||||
cudart_path = determine_cuda_runtime_lib_path()
|
cudart_path = determine_cuda_runtime_lib_path()
|
||||||
|
@ -125,7 +126,7 @@ def evaluate_cuda_setup():
|
||||||
)
|
)
|
||||||
return binary_name
|
return binary_name
|
||||||
|
|
||||||
print(f"CUDA SETUP: CUDA path found: {cudart_path}")
|
print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
|
||||||
cuda = get_cuda_lib_handle()
|
cuda = get_cuda_lib_handle()
|
||||||
cc = get_compute_capability(cuda)
|
cc = get_compute_capability(cuda)
|
||||||
print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
|
print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
|
||||||
|
@ -147,7 +148,7 @@ def evaluate_cuda_setup():
|
||||||
|
|
||||||
# we use ls -l instead of nvcc to determine the cuda version
|
# we use ls -l instead of nvcc to determine the cuda version
|
||||||
# since most installations will have the libcudart.so installed, but not the compiler
|
# since most installations will have the libcudart.so installed, but not the compiler
|
||||||
print(f'CUDA_SETUP: Detected CUDA version {cuda_version_string}')
|
print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
|
||||||
|
|
||||||
def get_binary_name():
|
def get_binary_name():
|
||||||
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
|
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
|
||||||
|
|
|
@ -371,6 +371,9 @@ template void transform<int32_t, COL32, ROW, false, 32>(cublasLtHandle_t ltHandl
|
||||||
template <int FORMATB, int DTYPE_OUT, int SCALE_ROWS> int igemmlt(cublasLtHandle_t ltHandle, int m, int n, int k, const int8_t *A, const int8_t *B, void *C, float *row_scale, int lda, int ldb, int ldc)
|
template <int FORMATB, int DTYPE_OUT, int SCALE_ROWS> int igemmlt(cublasLtHandle_t ltHandle, int m, int n, int k, const int8_t *A, const int8_t *B, void *C, float *row_scale, int lda, int ldb, int ldc)
|
||||||
{
|
{
|
||||||
#ifdef NO_CUBLASLT
|
#ifdef NO_CUBLASLT
|
||||||
|
printf("ERROR: Your GPU does not support Int8 Matmul!");
|
||||||
|
assert(false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
#else
|
#else
|
||||||
int has_error = 0;
|
int has_error = 0;
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -18,7 +18,7 @@ def read(fname):
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=f"bitsandbytes",
|
name=f"bitsandbytes",
|
||||||
version=f"0.31.8",
|
version=f"0.32.0",
|
||||||
author="Tim Dettmers",
|
author="Tim Dettmers",
|
||||||
author_email="dettmers@cs.washington.edu",
|
author_email="dettmers@cs.washington.edu",
|
||||||
description="8-bit optimizers and matrix multiplication routines.",
|
description="8-bit optimizers and matrix multiplication routines.",
|
||||||
|
|
Loading…
Reference in New Issue
Block a user