forked from mrq/bitsandbytes-rocm
addmm_
This commit is contained in:
parent
f6670329fb
commit
18f142e268
|
@ -318,7 +318,7 @@ class MatMul8bitLt(torch.autograd.Function):
|
||||||
|
|
||||||
# 4. Mixed-precision decomposition matmul
|
# 4. Mixed-precision decomposition matmul
|
||||||
if coo_tensorA is not None and subA is not None:
|
if coo_tensorA is not None and subA is not None:
|
||||||
output += torch.matmul(subA, state.subB)
|
output.addmm_(output, subA, state.subB)
|
||||||
|
|
||||||
# 5. Save state
|
# 5. Save state
|
||||||
ctx.state = state
|
ctx.state = state
|
||||||
|
|
Loading…
Reference in New Issue
Block a user