From 40361ecfbbe5c28b2c4e44f6542269f8c69cc6df Mon Sep 17 00:00:00 2001 From: arlo-phoenix Date: Sat, 5 Aug 2023 02:12:48 +0200 Subject: [PATCH] Adapt python to work with HIP --- bitsandbytes/autograd/_functions.py | 2 +- bitsandbytes/cuda_setup/main.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index f8403cf..2dac5d6 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -224,7 +224,7 @@ matmul_cublas = MatMul8bit.apply def supports_igemmlt(device: torch.device) -> bool: """check if this device supports the optimized int8 kernel""" - if torch.cuda.get_device_capability(device=device) < (7, 5): + if torch.cuda.get_device_capability(device=device) < (7, 5) or torch.version.hip: return False device_name = torch.cuda.get_device_name(device=device) nvidia16_models = ('GTX 1630', 'GTX 1650', 'GTX 1660') # https://en.wikipedia.org/wiki/GeForce_16_series diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index f3edf4c..ec99fb7 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -308,10 +308,14 @@ def determine_cuda_runtime_lib_path() -> Union[Path, None]: # https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION def get_cuda_version(): - major, minor = map(int, torch.version.cuda.split(".")) + if torch.version.cuda: + major, minor = map(int, torch.version.cuda.split(".")) if major < 11: CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!') + elif torch.version.hip: + major, minor = map(int, torch.version.hip.split(".")) + return f'{major}{minor}' @@ -332,7 +336,9 @@ def evaluate_cuda_setup(): cuda_setup.add_log_entry(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'), ('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')) cuda_setup.add_log_entry('='*80) + if not torch.cuda.is_available(): return 'libbitsandbytes_cpu.so', None, None, None + if torch.version.hip: return 'libbitsandbytes_hip_nohipblaslt.so', None, None, None cudart_path = determine_cuda_runtime_lib_path() ccs = get_compute_capabilities()