Spaces:
Paused
Paused
| import dataclasses | |
| import torch | |
| from torch import Tensor | |
| import torch.nn as nn | |
| from torch.nn import functional as F | |
| class CompressionConfig: | |
| """Group-wise quantization.""" | |
| num_bits: int | |
| group_size: int | |
| group_dim: int | |
| symmetric: bool | |
| enabled: bool = True | |
| default_compression_config = CompressionConfig( | |
| num_bits=8, group_size=256, group_dim=1, symmetric=True, enabled=True | |
| ) | |
| class CLinear(nn.Module): | |
| """Compressed Linear Layer.""" | |
| def __init__(self, weight, bias, device): | |
| super().__init__() | |
| self.weight = compress(weight.data.to(device), default_compression_config) | |
| self.bias = bias | |
| def forward(self, input: Tensor) -> Tensor: | |
| weight = decompress(self.weight, default_compression_config) | |
| return F.linear(input, weight, self.bias) | |
| def compress_module(module, target_device): | |
| for attr_str in dir(module): | |
| target_attr = getattr(module, attr_str) | |
| if type(target_attr) == torch.nn.Linear: | |
| setattr( | |
| module, | |
| attr_str, | |
| CLinear(target_attr.weight, target_attr.bias, target_device), | |
| ) | |
| for name, child in module.named_children(): | |
| compress_module(child, target_device) | |
| def compress(tensor, config): | |
| """Simulate group-wise quantization.""" | |
| if not config.enabled: | |
| return tensor | |
| group_size, num_bits, group_dim, symmetric = ( | |
| config.group_size, | |
| config.num_bits, | |
| config.group_dim, | |
| config.symmetric, | |
| ) | |
| assert num_bits <= 8 | |
| original_shape = tensor.shape | |
| num_groups = (original_shape[group_dim] + group_size - 1) // group_size | |
| new_shape = ( | |
| original_shape[:group_dim] | |
| + (num_groups, group_size) | |
| + original_shape[group_dim + 1 :] | |
| ) | |
| # Pad | |
| pad_len = (group_size - original_shape[group_dim] % group_size) % group_size | |
| if pad_len != 0: | |
| pad_shape = ( | |
| original_shape[:group_dim] + (pad_len,) + original_shape[group_dim + 1 :] | |
| ) | |
| tensor = torch.cat( | |
| [tensor, torch.zeros(pad_shape, dtype=tensor.dtype, device=tensor.device)], | |
| dim=group_dim, | |
| ) | |
| data = tensor.view(new_shape) | |
| # Quantize | |
| if symmetric: | |
| B = 2 ** (num_bits - 1) - 1 | |
| scale = B / torch.max(data.abs(), dim=group_dim + 1, keepdim=True)[0] | |
| data = data * scale | |
| data = data.clamp_(-B, B).round_().to(torch.int8) | |
| return data, scale, original_shape | |
| else: | |
| B = 2**num_bits - 1 | |
| mn = torch.min(data, dim=group_dim + 1, keepdim=True)[0] | |
| mx = torch.max(data, dim=group_dim + 1, keepdim=True)[0] | |
| scale = B / (mx - mn) | |
| data = data - mn | |
| data.mul_(scale) | |
| data = data.clamp_(0, B).round_().to(torch.uint8) | |
| return data, mn, scale, original_shape | |
| def decompress(packed_data, config): | |
| """Simulate group-wise dequantization.""" | |
| if not config.enabled: | |
| return packed_data | |
| group_size, num_bits, group_dim, symmetric = ( | |
| config.group_size, | |
| config.num_bits, | |
| config.group_dim, | |
| config.symmetric, | |
| ) | |
| # Dequantize | |
| if symmetric: | |
| data, scale, original_shape = packed_data | |
| data = data / scale | |
| else: | |
| data, mn, scale, original_shape = packed_data | |
| data = data / scale | |
| data.add_(mn) | |
| # Unpad | |
| pad_len = (group_size - original_shape[group_dim] % group_size) % group_size | |
| if pad_len: | |
| padded_original_shape = ( | |
| original_shape[:group_dim] | |
| + (original_shape[group_dim] + pad_len,) | |
| + original_shape[group_dim + 1 :] | |
| ) | |
| data = data.reshape(padded_original_shape) | |
| indices = [slice(0, x) for x in original_shape] | |
| return data[indices].contiguous() | |
| else: | |
| return data.view(original_shape) | |