Merge pull request #25 from TimDettmers/remove_unused_code

Remove unused code, switch to warnings
This commit is contained in:
Tim Dettmers 2022-09-05 16:29:36 -07:00 committed by GitHub
commit f0ae860c86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 4 additions and 133 deletions

View File

@ -3,8 +3,9 @@
# cli() # cli()
import os import os
import sys import sys
import torch from warnings import warn
import torch
HEADER_WIDTH = 60 HEADER_WIDTH = 60
@ -32,8 +33,6 @@ print()
from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL
from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle
from .cuda_setup.env_vars import to_be_ignored from .cuda_setup.env_vars import to_be_ignored
from .utils import print_stderr
print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS") print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS")
for k, v in os.environ.items(): for k, v in os.environ.items():
@ -84,7 +83,7 @@ try:
except ImportError: except ImportError:
print() print()
print_stderr( warn(
f"WARNING: {__package__} is currently running as CPU-only!\n" f"WARNING: {__package__} is currently running as CPU-only!\n"
"Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n" "Therefore, 8-bit optimizers and GPU quantization are unavailable.\n\n"
f"If you think that this is so erroneously,\nplease report an issue!" f"If you think that this is so erroneously,\nplease report an issue!"

View File

@ -1,6 +1,5 @@
import operator import operator
import torch import torch
import bitsandbytes as bnb
import bitsandbytes.functional as F import bitsandbytes.functional as F
from dataclasses import dataclass from dataclasses import dataclass
@ -378,9 +377,6 @@ class MatMul8bitLt(torch.autograd.Function):
return grad_A, grad_B, None, grad_bias, None return grad_A, grad_B, None, grad_bias, None
matmul = MatMul8bitLt.apply
def matmul( def matmul(
A: tensor, A: tensor,
B: tensor, B: tensor,

View File

@ -1,79 +0,0 @@
import ctypes
from dataclasses import dataclass, field
@dataclass
class CudaLibVals:
# code bits taken from
# https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
nGpus: ctypes.c_int = field(default=ctypes.c_int())
cc_major: ctypes.c_int = field(default=ctypes.c_int())
cc_minor: ctypes.c_int = field(default=ctypes.c_int())
device: ctypes.c_int = field(default=ctypes.c_int())
error_str: ctypes.c_char_p = field(default=ctypes.c_char_p())
cuda: ctypes.CDLL = field(init=False, repr=False)
ccs: List[str, ...] = field(init=False)
def _initialize_driver_API(self):
self.check_cuda_result(self.cuda.cuInit(0))
def _load_cuda_lib(self):
"""
1. find libcuda.so library (GPU driver) (/usr/lib)
init_device -> init variables -> call function by reference
"""
libnames = "libcuda.so"
for libname in libnames:
try:
self.cuda = ctypes.CDLL(libname)
except OSError:
continue
else:
break
else:
raise OSError("could not load any of: " + " ".join(libnames))
def call_cuda_func(self, function_obj, **kwargs):
CUDA_SUCCESS = 0 # constant taken from cuda.h
pass
# if (CUDA_SUCCESS := function_obj(
def _error_handle(cuda_lib_call_return_value):
"""
2. call extern C function to determine CC
(see https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__DEVICE__DEPRECATED.html)
"""
CUDA_SUCCESS = 0 # constant taken from cuda.h
if cuda_lib_call_return_value != CUDA_SUCCESS:
self.cuda.cuGetErrorString(
cuda_lib_call_return_value,
ctypes.byref(self.error_str),
)
print("Count not initialize CUDA - failure!")
raise Exception("CUDA exception!")
return cuda_lib_call_return_value
def __post_init__(self):
self._load_cuda_lib()
self._initialize_driver_API()
self.check_cuda_result(
self.cuda, self.cuda.cuDeviceGetCount(ctypes.byref(self.nGpus))
)
tmp_ccs = []
for gpu_index in range(self.nGpus.value):
check_cuda_result(
self.cuda,
self.cuda.cuDeviceGet(ctypes.byref(self.device), gpu_index),
)
check_cuda_result(
self.cuda,
self.cuda.cuDeviceComputeCapability(
ctypes.byref(self.cc_major),
ctypes.byref(self.cc_minor),
self.device,
),
)
tmp_ccs.append(f"{self.cc_major.value}.{self.cc_minor.value}")
self.ccs = sorted(tmp_ccs, reverse=True)

View File

@ -17,10 +17,7 @@ evaluation:
""" """
import ctypes import ctypes
import torch
from pathlib import Path
from ..utils import execute_and_return
from .paths import determine_cuda_runtime_lib_path from .paths import determine_cuda_runtime_lib_path
@ -81,7 +78,6 @@ def get_compute_capabilities(cuda):
cc_major = ctypes.c_int() cc_major = ctypes.c_int()
cc_minor = ctypes.c_int() cc_minor = ctypes.c_int()
result = ctypes.c_int()
device = ctypes.c_int() device = ctypes.c_int()
check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus))) check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))

View File

@ -2,23 +2,11 @@ from pathlib import Path
from typing import Set, Union from typing import Set, Union
from warnings import warn from warnings import warn
from ..utils import print_stderr
from .env_vars import get_potentially_lib_path_containing_env_vars from .env_vars import get_potentially_lib_path_containing_env_vars
CUDA_RUNTIME_LIB: str = "libcudart.so" CUDA_RUNTIME_LIB: str = "libcudart.so"
def purge_unwanted_semicolon(tentative_path: Path) -> Path:
"""
Special function to handle the following exception:
__LMOD_REF_COUNT_PATH=/sw/cuda/11.6.2/bin:2;/mmfs1/home/dettmers/git/sched/bin:1;/mmfs1/home/dettmers/data/anaconda3/bin:1;/mmfs1/home/dettmers/data/anaconda3/condabin:1;/mmfs1/home/dettmers/.local/bin:1;/mmfs1/home/dettmers/bin:1;/usr/local/bin:1;/usr/bin:1;/usr/local/sbin:1;/usr/sbin:1;/mmfs1/home/dettmers/.fzf/bin:1;/mmfs1/home/dettmers/data/local/cuda-11.4/bin:1
"""
# if ';' in str(tentative_path):
# path_as_str, _ = str(tentative_path).split(';')
pass
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]: def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path} return {Path(ld_path) for ld_path in paths_list_candidate.split(":") if ld_path}
@ -29,7 +17,7 @@ def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
} }
if non_existent_directories: if non_existent_directories:
print_stderr( warn(
"WARNING: The following directories listed in your path were found to " "WARNING: The following directories listed in your path were found to "
f"be non-existent: {non_existent_directories}" f"be non-existent: {non_existent_directories}"
) )
@ -117,8 +105,6 @@ def determine_cuda_runtime_lib_path() -> Union[Path, None]:
if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"} if env_var not in {"CONDA_PREFIX", "LD_LIBRARY_PATH"}
} }
cuda_runtime_libs = set() cuda_runtime_libs = set()
for env_var, value in remaining_candidate_env_vars.items(): for env_var, value in remaining_candidate_env_vars.items():
cuda_runtime_libs.update(find_cuda_lib_in(value)) cuda_runtime_libs.update(find_cuda_lib_in(value))

View File

@ -5,7 +5,6 @@
import ctypes as ct import ctypes as ct
import operator import operator
import random import random
import math
import torch import torch
from typing import Tuple from typing import Tuple
@ -243,23 +242,6 @@ def get_transform_func(dtype, orderA, orderOut, transpose=False):
return getattr(lib, name) return getattr(lib, name)
class GlobalData(object):
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.data = {}
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
def get_transform_buffer( def get_transform_buffer(
shape, dtype, device, to_order, from_order="row", transpose=False shape, dtype, device, to_order, from_order="row", transpose=False
): ):

View File

@ -1,6 +1,5 @@
import shlex import shlex
import subprocess import subprocess
import sys
from typing import Tuple from typing import Tuple
@ -22,11 +21,3 @@ def execute_and_return(command_string: str) -> Tuple[str, str]:
std_out, std_err = execute_and_return_decoded_std_streams(command_string) std_out, std_err = execute_and_return_decoded_std_streams(command_string)
return std_out, std_err return std_out, std_err
def print_stderr(s: str) -> None:
print(s, file=sys.stderr)
def warn_of_missing_prerequisite(s: str) -> None:
print_stderr("WARNING, missing pre-requisite: " + s)