2022-08-01 10:31:48 +00:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
#
|
|
|
|
# This source code is licensed under the MIT license found in the
|
2021-10-06 02:16:20 +00:00
|
|
|
# LICENSE file in the root directory of this source tree.
|
2022-10-27 11:32:01 +00:00
|
|
|
from typing import Optional, TypeVar, Union, overload
|
2021-10-06 02:16:20 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
import torch
|
2021-10-06 02:16:20 +00:00
|
|
|
import torch.nn.functional as F
|
2022-08-01 10:31:48 +00:00
|
|
|
from torch import Tensor, device, dtype, nn
|
2021-10-06 02:16:20 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
import bitsandbytes as bnb
|
2021-10-06 02:16:20 +00:00
|
|
|
from bitsandbytes.optim import GlobalOptimManager
|
2023-01-29 01:05:22 +00:00
|
|
|
from bitsandbytes.utils import OutlierTracer, find_outlier_dims
|
2021-10-06 02:16:20 +00:00
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
T = TypeVar("T", bound="torch.nn.Module")
|
|
|
|
|
2022-07-22 21:41:05 +00:00
|
|
|
|
2021-10-06 02:16:20 +00:00
|
|
|
class StableEmbedding(torch.nn.Embedding):
|
2022-08-01 10:31:48 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
num_embeddings: int,
|
|
|
|
embedding_dim: int,
|
|
|
|
padding_idx: Optional[int] = None,
|
|
|
|
max_norm: Optional[float] = None,
|
|
|
|
norm_type: float = 2.0,
|
|
|
|
scale_grad_by_freq: bool = False,
|
|
|
|
sparse: bool = False,
|
|
|
|
_weight: Optional[Tensor] = None,
|
2022-11-04 21:05:30 +00:00
|
|
|
device=None,
|
|
|
|
dtype=None,
|
2022-08-01 10:31:48 +00:00
|
|
|
) -> None:
|
2022-10-27 11:14:13 +00:00
|
|
|
super().__init__(
|
2022-08-01 10:31:48 +00:00
|
|
|
num_embeddings,
|
|
|
|
embedding_dim,
|
|
|
|
padding_idx,
|
|
|
|
max_norm,
|
|
|
|
norm_type,
|
|
|
|
scale_grad_by_freq,
|
|
|
|
sparse,
|
|
|
|
_weight,
|
2022-11-04 21:05:30 +00:00
|
|
|
device,
|
|
|
|
dtype,
|
2022-08-01 10:31:48 +00:00
|
|
|
)
|
2022-11-04 21:05:30 +00:00
|
|
|
self.norm = torch.nn.LayerNorm(embedding_dim, device=device)
|
2022-08-01 10:31:48 +00:00
|
|
|
GlobalOptimManager.get_instance().register_module_override(
|
|
|
|
self, "weight", {"optim_bits": 32}
|
|
|
|
)
|
2021-10-06 02:16:20 +00:00
|
|
|
|
|
|
|
def reset_parameters(self) -> None:
|
|
|
|
torch.nn.init.xavier_uniform_(self.weight)
|
|
|
|
self._fill_padding_idx_with_zero()
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
|
2021-10-06 02:16:20 +00:00
|
|
|
to make the Layer compatible with Pytorch < 1.9.
|
|
|
|
This means that if this changes in future PyTorch releases this need to change too
|
|
|
|
which is cumbersome. However, with this we can ensure compatibility with previous
|
|
|
|
PyTorch releases.
|
2022-08-01 10:31:48 +00:00
|
|
|
"""
|
|
|
|
|
2021-10-06 02:16:20 +00:00
|
|
|
def _fill_padding_idx_with_zero(self) -> None:
|
|
|
|
if self.padding_idx is not None:
|
|
|
|
with torch.no_grad():
|
|
|
|
self.weight[self.padding_idx].fill_(0)
|
|
|
|
|
|
|
|
def forward(self, input: Tensor) -> Tensor:
|
|
|
|
emb = F.embedding(
|
2022-08-01 10:31:48 +00:00
|
|
|
input,
|
|
|
|
self.weight,
|
|
|
|
self.padding_idx,
|
|
|
|
self.max_norm,
|
|
|
|
self.norm_type,
|
|
|
|
self.scale_grad_by_freq,
|
|
|
|
self.sparse,
|
|
|
|
)
|
2021-10-06 02:16:20 +00:00
|
|
|
|
2022-11-04 21:05:30 +00:00
|
|
|
# always apply layer norm in full precision
|
|
|
|
emb = emb.to(torch.get_default_dtype())
|
|
|
|
|
|
|
|
return self.norm(emb).to(self.weight.dtype)
|
2021-11-29 17:32:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Embedding(torch.nn.Embedding):
|
2022-08-01 10:31:48 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
num_embeddings: int,
|
|
|
|
embedding_dim: int,
|
|
|
|
padding_idx: Optional[int] = None,
|
|
|
|
max_norm: Optional[float] = None,
|
|
|
|
norm_type: float = 2.0,
|
|
|
|
scale_grad_by_freq: bool = False,
|
|
|
|
sparse: bool = False,
|
|
|
|
_weight: Optional[Tensor] = None,
|
|
|
|
) -> None:
|
2022-10-27 11:14:13 +00:00
|
|
|
super().__init__(
|
2022-08-01 10:31:48 +00:00
|
|
|
num_embeddings,
|
|
|
|
embedding_dim,
|
|
|
|
padding_idx,
|
|
|
|
max_norm,
|
|
|
|
norm_type,
|
|
|
|
scale_grad_by_freq,
|
|
|
|
sparse,
|
|
|
|
_weight,
|
|
|
|
)
|
|
|
|
GlobalOptimManager.get_instance().register_module_override(
|
|
|
|
self, "weight", {"optim_bits": 32}
|
|
|
|
)
|
2021-11-29 17:32:13 +00:00
|
|
|
|
|
|
|
def reset_parameters(self) -> None:
|
|
|
|
torch.nn.init.xavier_uniform_(self.weight)
|
|
|
|
self._fill_padding_idx_with_zero()
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
""" !!! This is a redefinition of _fill_padding_idx_with_zero in torch.nn.Embedding
|
2021-11-29 17:32:13 +00:00
|
|
|
to make the Layer compatible with Pytorch < 1.9.
|
|
|
|
This means that if this changes in future PyTorch releases this need to change too
|
|
|
|
which is cumbersome. However, with this we can ensure compatibility with previous
|
|
|
|
PyTorch releases.
|
2022-08-01 10:31:48 +00:00
|
|
|
"""
|
|
|
|
|
2021-11-29 17:32:13 +00:00
|
|
|
def _fill_padding_idx_with_zero(self) -> None:
|
|
|
|
if self.padding_idx is not None:
|
|
|
|
with torch.no_grad():
|
|
|
|
self.weight[self.padding_idx].fill_(0)
|
|
|
|
|
|
|
|
def forward(self, input: Tensor) -> Tensor:
|
|
|
|
emb = F.embedding(
|
2022-08-01 10:31:48 +00:00
|
|
|
input,
|
|
|
|
self.weight,
|
|
|
|
self.padding_idx,
|
|
|
|
self.max_norm,
|
|
|
|
self.norm_type,
|
|
|
|
self.scale_grad_by_freq,
|
|
|
|
self.sparse,
|
|
|
|
)
|
2021-11-29 17:32:13 +00:00
|
|
|
|
|
|
|
return emb
|
2022-07-22 21:41:05 +00:00
|
|
|
|
2023-01-29 01:05:22 +00:00
|
|
|
class OutlierAwareLinear(nn.Linear):
|
|
|
|
def __init__(self, input_features, output_features, bias=True):
|
|
|
|
super().__init__(input_features, output_features, bias)
|
|
|
|
self.outlier_dim = None
|
|
|
|
self.is_quantized = False
|
|
|
|
|
|
|
|
def forward_with_outliers(self, x, outlier_idx):
|
|
|
|
raise NotImplementedError('Please override the `forward_with_outliers(self, x, outlier_idx)` function')
|
|
|
|
|
|
|
|
def quantize_weight(self, w, outlier_idx):
|
|
|
|
raise NotImplementedError('Please override the `quantize_weights(self, w, outlier_idx)` function')
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
if self.outlier_dim is None:
|
|
|
|
tracer = OutlierTracer.get_instance()
|
|
|
|
if not tracer.is_initialized():
|
|
|
|
print('Please use OutlierTracer.initialize(model) before using the OutlierAwareLinear layer')
|
|
|
|
outlier_idx = tracer.get_outliers(self.weight)
|
|
|
|
#print(outlier_idx, tracer.get_hvalue(self.weight))
|
|
|
|
self.outlier_dim = outlier_idx
|
|
|
|
|
|
|
|
if not self.is_quantized:
|
|
|
|
w = self.quantize_weight(self.weight, self.outlier_dim)
|
|
|
|
self.weight.data.copy_(w)
|
|
|
|
self.is_quantized = True
|
|
|
|
|
|
|
|
return self.forward_with_outliers(x, self.outlier_dim)
|
|
|
|
|
|
|
|
|
|
|
|
class Fake4bitLinear(OutlierAwareLinear):
|
|
|
|
def __init__(self, input_features, output_features, bias=True, codebook=bnb.functional.create_fp8_map(True, 3, 0, total_bits=4)):
|
|
|
|
super().__init__(input_features, output_features, bias)
|
|
|
|
self.codebook = codebook
|
|
|
|
|
|
|
|
def quantize_weight(self, w, outlier_idx):
|
|
|
|
if outlier_idx.numel() > 0:
|
|
|
|
subw = w[:, outlier_idx].clone()
|
|
|
|
w[:, outlier_idx] = 0
|
|
|
|
wdtype = w.dtype
|
|
|
|
code = self.codebook.to(w.device)
|
|
|
|
cw, state = bnb.functional.quantize_blockwise(w, code=code, blocksize=64)
|
|
|
|
w = bnb.functional.dequantize_blockwise(cw, state, blocksize=64)
|
|
|
|
w = w.to(wdtype)
|
|
|
|
if outlier_idx.numel() > 0:
|
|
|
|
w[:, outlier_idx] = subw
|
|
|
|
self.is_quantized = True
|
|
|
|
return w
|
|
|
|
|
|
|
|
def forward_with_outliers(self, x, outlier_idx):
|
|
|
|
dims = torch.abs(x> 4).sum(dim=list(range(len(x.shape)-1)))
|
|
|
|
outlier_idx2 = torch.where(dims > 0)[0]
|
|
|
|
outlier_idx = torch.cat([outlier_idx, outlier_idx2]).unique()
|
|
|
|
n = x.shape[-1]
|
|
|
|
idx = torch.arange(n, device=x.device)
|
|
|
|
idx[outlier_idx] = -1
|
|
|
|
inverse_idx = torch.where(idx >= 0)[0]
|
|
|
|
if outlier_idx.numel() > 0:
|
|
|
|
subx = x[..., outlier_idx].clone()
|
|
|
|
#print(1, subx, 1)
|
|
|
|
#x[..., outlier_idx] = 0
|
|
|
|
inverse_x = x[...,inverse_idx]
|
|
|
|
xdtype = x.dtype
|
|
|
|
#code = bnb.functional.create_fp8_map(True, 4-3, 2, 4).to(x.device)
|
|
|
|
#code = bnb.functional.create_quantile_map(x, 4).to(x.device)
|
|
|
|
code = bnb.functional.create_dynamic_map(True, total_bits=4.0).to(x.device)
|
|
|
|
c, state = bnb.functional.quantize_blockwise(inverse_x, code=code, blocksize=64)
|
|
|
|
inverse_x = bnb.functional.dequantize_blockwise(c, state, blocksize=64)
|
|
|
|
#c, state = bnb.functional.quantize_blockwise(x, code=code, blocksize=64)
|
|
|
|
#x = bnb.functional.dequantize_blockwise(c, state, blocksize=64)
|
|
|
|
x = x.to(xdtype)
|
|
|
|
x[..., inverse_idx] = inverse_x.to(x.dtype)
|
|
|
|
#if outlier_idx.numel() > 0:
|
|
|
|
#x[..., outlier_idx] = subx
|
|
|
|
|
|
|
|
return torch.nn.functional.linear(x, self.weight, self.bias)
|
|
|
|
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
|
2022-07-22 21:41:05 +00:00
|
|
|
class Int8Params(torch.nn.Parameter):
|
2022-08-01 10:31:48 +00:00
|
|
|
def __new__(
|
2022-08-01 16:32:47 +00:00
|
|
|
cls,
|
|
|
|
data=None,
|
|
|
|
requires_grad=True,
|
|
|
|
has_fp16_weights=False,
|
|
|
|
CB=None,
|
|
|
|
SCB=None,
|
2022-08-01 10:31:48 +00:00
|
|
|
):
|
2022-07-22 21:41:05 +00:00
|
|
|
cls.has_fp16_weights = has_fp16_weights
|
|
|
|
cls.CB = None
|
|
|
|
cls.SCB = None
|
|
|
|
if data is None:
|
|
|
|
data = torch.empty(0)
|
|
|
|
return torch.Tensor._make_subclass(cls, data, requires_grad)
|
|
|
|
|
|
|
|
def cuda(self, device):
|
|
|
|
if self.has_fp16_weights:
|
|
|
|
return super().cuda(device)
|
|
|
|
else:
|
|
|
|
# we store the 8-bit rows-major weight
|
|
|
|
# we convert this weight to the turning/ampere weight during the first inference pass
|
|
|
|
B = self.data.contiguous().half().cuda(device)
|
|
|
|
CB, CBt, SCB, SCBt, coo_tensorB = bnb.functional.double_quant(B)
|
|
|
|
del CBt
|
2022-08-25 16:09:23 +00:00
|
|
|
del SCBt
|
2022-07-22 21:41:05 +00:00
|
|
|
self.data = CB
|
2022-08-01 10:31:48 +00:00
|
|
|
setattr(self, "CB", CB)
|
|
|
|
setattr(self, "SCB", SCB)
|
2022-07-22 21:41:05 +00:00
|
|
|
|
|
|
|
return self
|
|
|
|
|
|
|
|
@overload
|
2022-08-01 10:31:48 +00:00
|
|
|
def to(
|
|
|
|
self: T,
|
|
|
|
device: Optional[Union[int, device]] = ...,
|
|
|
|
dtype: Optional[Union[dtype, str]] = ...,
|
|
|
|
non_blocking: bool = ...,
|
|
|
|
) -> T:
|
2022-07-22 21:41:05 +00:00
|
|
|
...
|
|
|
|
|
|
|
|
@overload
|
|
|
|
def to(self: T, dtype: Union[dtype, str], non_blocking: bool = ...) -> T:
|
|
|
|
...
|
|
|
|
|
|
|
|
@overload
|
|
|
|
def to(self: T, tensor: Tensor, non_blocking: bool = ...) -> T:
|
|
|
|
...
|
|
|
|
|
|
|
|
def to(self, *args, **kwargs):
|
2022-08-01 10:31:48 +00:00
|
|
|
device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(
|
|
|
|
*args, **kwargs
|
|
|
|
)
|
|
|
|
|
|
|
|
if (
|
|
|
|
device is not None
|
|
|
|
and device.type == "cuda"
|
|
|
|
and self.data.device.type == "cpu"
|
|
|
|
):
|
|
|
|
return self.cuda(device)
|
2022-07-22 21:41:05 +00:00
|
|
|
else:
|
2022-08-01 10:31:48 +00:00
|
|
|
new_param = Int8Params(
|
2022-08-01 16:32:47 +00:00
|
|
|
super().to(
|
|
|
|
device=device, dtype=dtype, non_blocking=non_blocking
|
|
|
|
),
|
2022-08-01 10:31:48 +00:00
|
|
|
requires_grad=self.requires_grad,
|
|
|
|
has_fp16_weights=self.has_fp16_weights,
|
|
|
|
)
|
2022-07-22 21:41:05 +00:00
|
|
|
new_param.CB = self.CB
|
|
|
|
new_param.SCB = self.SCB
|
|
|
|
|
|
|
|
return new_param
|
|
|
|
|
|
|
|
|
|
|
|
class Linear8bitLt(nn.Linear):
|
2022-08-01 10:31:48 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
input_features,
|
|
|
|
output_features,
|
|
|
|
bias=True,
|
|
|
|
has_fp16_weights=True,
|
2022-09-11 03:28:17 +00:00
|
|
|
memory_efficient_backward=False,
|
2022-08-01 10:31:48 +00:00
|
|
|
threshold=0.0,
|
|
|
|
index=None,
|
|
|
|
):
|
2022-10-27 11:14:13 +00:00
|
|
|
super().__init__(
|
2022-08-01 16:32:47 +00:00
|
|
|
input_features, output_features, bias
|
|
|
|
)
|
2022-07-22 21:41:05 +00:00
|
|
|
self.state = bnb.MatmulLtState()
|
2022-08-01 10:31:48 +00:00
|
|
|
self.index = index
|
2022-07-22 21:41:05 +00:00
|
|
|
|
|
|
|
self.state.threshold = threshold
|
|
|
|
self.state.has_fp16_weights = has_fp16_weights
|
2022-09-11 02:51:29 +00:00
|
|
|
self.state.memory_efficient_backward = memory_efficient_backward
|
2022-07-22 21:41:05 +00:00
|
|
|
if threshold > 0.0 and not has_fp16_weights:
|
|
|
|
self.state.use_pool = True
|
|
|
|
|
2022-09-17 22:09:24 +00:00
|
|
|
self.weight = Int8Params(
|
|
|
|
self.weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights
|
|
|
|
)
|
2022-07-22 21:41:05 +00:00
|
|
|
|
|
|
|
def init_8bit_state(self):
|
|
|
|
self.state.CB = self.weight.CB
|
|
|
|
self.state.SCB = self.weight.SCB
|
|
|
|
self.weight.CB = None
|
|
|
|
self.weight.SCB = None
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
self.state.is_training = self.training
|
|
|
|
|
2022-08-01 10:31:48 +00:00
|
|
|
if self.weight.CB is not None:
|
|
|
|
self.init_8bit_state()
|
2022-08-17 10:45:57 +00:00
|
|
|
|
|
|
|
# weights are cast automatically as Int8Params, but the bias has to be cast manually
|
|
|
|
if self.bias is not None and self.bias.dtype != torch.float16:
|
2022-08-16 19:00:54 +00:00
|
|
|
self.bias.data = self.bias.data.half()
|
2022-07-22 21:41:05 +00:00
|
|
|
|
2022-08-16 19:00:54 +00:00
|
|
|
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
|
2022-07-22 21:41:05 +00:00
|
|
|
|
2022-09-11 02:51:29 +00:00
|
|
|
if not self.state.has_fp16_weights:
|
|
|
|
if not self.state.memory_efficient_backward and self.state.CB is not None:
|
|
|
|
# we converted 8-bit row major to turing/ampere format in the first inference pass
|
|
|
|
# we no longer need the row-major weight
|
|
|
|
del self.state.CB
|
|
|
|
self.weight.data = self.state.CxB
|
|
|
|
elif self.state.memory_efficient_backward and self.state.CxB is not None:
|
|
|
|
# For memory efficient backward, we convert 8-bit row major to turing/ampere format at each inference pass.
|
2022-10-27 11:11:29 +00:00
|
|
|
# Thus, we delete CxB from the state.
|
2022-09-11 02:51:29 +00:00
|
|
|
del self.state.CxB
|
2022-07-22 21:41:05 +00:00
|
|
|
|
|
|
|
return out
|
2023-02-14 00:53:07 +00:00
|
|
|
|
|
|
|
class LinearFP8(nn.Linear):
|
|
|
|
def __init__(self, input_features, output_features, bias=True):
|
|
|
|
super().__init__(input_features, output_features, bias)
|
|
|
|
self.bw_code = None
|
|
|
|
self.fw_code = None
|
|
|
|
|
|
|
|
def forward(self, x: torch.Tensor):
|
|
|
|
if self.fw_code is None:
|
|
|
|
self.bw_code = F.create_fp8_map(True, 5, 2, 8).to(x.device)
|
|
|
|
self.fw_code = F.create_fp8_map(True, 4, 3, 8).to(x.device)
|
|
|
|
|
2023-02-14 01:20:52 +00:00
|
|
|
out = bnb.matmul_fp8(x, self.weight.t(), fw_code=self.fw_code, code=self.bw_code)
|
|
|
|
if self.bias is not None:
|
|
|
|
out += self.bias
|
2023-02-14 00:53:07 +00:00
|
|
|
|
|
|
|
return out
|
|
|
|
|