diff --git a/bitsandbytes/__init__.py b/bitsandbytes/__init__.py index 6d1177f..041df4b 100644 --- a/bitsandbytes/__init__.py +++ b/bitsandbytes/__init__.py @@ -3,6 +3,7 @@ # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. +from . import cuda_setup, utils from .autograd._functions import ( MatmulLtState, bmm_cublas, @@ -12,7 +13,6 @@ from .autograd._functions import ( ) from .cextension import COMPILED_WITH_CUDA from .nn import modules -from . import cuda_setup, utils if COMPILED_WITH_CUDA: from .optim import adam diff --git a/bitsandbytes/__main__.py b/bitsandbytes/__main__.py index f1b7670..ac7948b 100644 --- a/bitsandbytes/__main__.py +++ b/bitsandbytes/__main__.py @@ -28,8 +28,8 @@ print() from . import COMPILED_WITH_CUDA, PACKAGE_GITHUB_URL -from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle from .cuda_setup.env_vars import to_be_ignored +from .cuda_setup.main import get_compute_capabilities, get_cuda_lib_handle print_header("POTENTIALLY LIBRARY-PATH-LIKE ENV VARS") for k, v in os.environ.items(): diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index cc57061..c3b0ac6 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -1,12 +1,13 @@ import operator import warnings - -import torch -import bitsandbytes.functional as F - from dataclasses import dataclass from functools import reduce # Required in Python 3 +import torch + +import bitsandbytes.functional as F + + # math.prod not compatible with python < 3.8 def prod(iterable): return reduce(operator.mul, iterable, 1) diff --git a/bitsandbytes/cextension.py b/bitsandbytes/cextension.py index 7b10aaa..9066f91 100644 --- a/bitsandbytes/cextension.py +++ b/bitsandbytes/cextension.py @@ -1,9 +1,9 @@ import ctypes as ct -import torch - from pathlib import Path from warnings import warn +import torch + class CUDASetup: _instance = None diff --git a/bitsandbytes/cuda_setup/__init__.py b/bitsandbytes/cuda_setup/__init__.py index d8ebba8..e781b9d 100644 --- a/bitsandbytes/cuda_setup/__init__.py +++ b/bitsandbytes/cuda_setup/__init__.py @@ -1,2 +1,6 @@ -from .paths import CUDA_RUNTIME_LIB, extract_candidate_paths, determine_cuda_runtime_lib_path from .main import evaluate_cuda_setup +from .paths import ( + CUDA_RUNTIME_LIB, + determine_cuda_runtime_lib_path, + extract_candidate_paths, +) diff --git a/bitsandbytes/cuda_setup/main.py b/bitsandbytes/cuda_setup/main.py index 0bdce1a..8ea7d28 100644 --- a/bitsandbytes/cuda_setup/main.py +++ b/bitsandbytes/cuda_setup/main.py @@ -18,9 +18,10 @@ evaluation: import ctypes -from .paths import determine_cuda_runtime_lib_path from bitsandbytes.cextension import CUDASetup +from .paths import determine_cuda_runtime_lib_path + def check_cuda_result(cuda, result_val): # 3. Check for CUDA errors diff --git a/bitsandbytes/cuda_setup/paths.py b/bitsandbytes/cuda_setup/paths.py index 3223359..f1adf4c 100644 --- a/bitsandbytes/cuda_setup/paths.py +++ b/bitsandbytes/cuda_setup/paths.py @@ -1,6 +1,7 @@ import errno from pathlib import Path from typing import Set, Union + from bitsandbytes.cextension import CUDASetup from .env_vars import get_potentially_lib_path_containing_env_vars diff --git a/bitsandbytes/functional.py b/bitsandbytes/functional.py index b3ca007..f6bd2be 100644 --- a/bitsandbytes/functional.py +++ b/bitsandbytes/functional.py @@ -5,13 +5,14 @@ import ctypes as ct import operator import random -import torch - +from functools import reduce # Required in Python 3 from typing import Tuple + +import torch from torch import Tensor from .cextension import COMPILED_WITH_CUDA, lib -from functools import reduce # Required in Python 3 + # math.prod not compatible with python < 3.8 def prod(iterable): diff --git a/bitsandbytes/optim/__init__.py b/bitsandbytes/optim/__init__.py index d18f1d1..8c8a8f4 100644 --- a/bitsandbytes/optim/__init__.py +++ b/bitsandbytes/optim/__init__.py @@ -5,12 +5,11 @@ from bitsandbytes.cextension import COMPILED_WITH_CUDA +from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit from .adam import Adam, Adam8bit, Adam32bit from .adamw import AdamW, AdamW8bit, AdamW32bit -from .sgd import SGD, SGD8bit, SGD32bit -from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS from .lamb import LAMB, LAMB8bit, LAMB32bit -from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit -from .adagrad import Adagrad, Adagrad8bit, Adagrad32bit - +from .lars import LARS, LARS8bit, LARS32bit, PytorchLARS from .optimizer import GlobalOptimManager +from .rmsprop import RMSprop, RMSprop8bit, RMSprop32bit +from .sgd import SGD, SGD8bit, SGD32bit diff --git a/tests/test_autograd.py b/tests/test_autograd.py index f1cfa8d..c67126d 100644 --- a/tests/test_autograd.py +++ b/tests/test_autograd.py @@ -1,4 +1,4 @@ -from itertools import product, permutations +from itertools import permutations, product import pytest import torch diff --git a/tests/test_cuda_setup_evaluator.py b/tests/test_cuda_setup_evaluator.py index bf9a003..7edc01f 100644 --- a/tests/test_cuda_setup_evaluator.py +++ b/tests/test_cuda_setup_evaluator.py @@ -1,13 +1,13 @@ import os -import pytest -import bitsandbytes as bnb - from typing import List, NamedTuple +import pytest + +import bitsandbytes as bnb from bitsandbytes.cuda_setup import ( CUDA_RUNTIME_LIB, - evaluate_cuda_setup, determine_cuda_runtime_lib_path, + evaluate_cuda_setup, extract_candidate_paths, )