From aa49b0a6cd121ec539eee905349cb59d05557287 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Thu, 16 Feb 2023 22:18:52 +0100 Subject: [PATCH] Also disable igemmlt for AMD GPUs --- bitsandbytes/autograd/_functions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bitsandbytes/autograd/_functions.py b/bitsandbytes/autograd/_functions.py index 376fb8a..f2c62da 100644 --- a/bitsandbytes/autograd/_functions.py +++ b/bitsandbytes/autograd/_functions.py @@ -270,7 +270,8 @@ class MatMul8bitLt(torch.autograd.Function): @staticmethod def forward(ctx, A, B, out=None, bias=None, state=MatmulLtState): - using_igemmlt = torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt + # Also disable igemmlt for AMD + using_igemmlt = "AMD" not in torch.cuda.get_device_name() and torch.cuda.get_device_capability(device=A.device) >= (7, 5) and not state.force_no_igemmlt # default of pytorch behavior if inputs are empty ctx.is_empty = False if prod(A.shape) == 0: