Skip to content

Commit

Permalink
Applied formatter changes
Browse files Browse the repository at this point in the history
Ran ruff formatter on the project (`check_dirs` in makefile) to fix format issues.
  • Loading branch information
ishandeva authored and dacorvo committed Oct 8, 2024
1 parent 83226a0 commit b0cce24
Show file tree
Hide file tree
Showing 21 changed files with 5 additions and 27 deletions.
4 changes: 3 additions & 1 deletion examples/nlp/text-generation/quantize_causal_lm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,9 @@ def main():
torch_dtype = (
torch.float16
if args.load_dtype == "float16"
else torch.bfloat16 if args.load_dtype == "bfloat16" else torch.float32
else torch.bfloat16
if args.load_dtype == "bfloat16"
else torch.float32
)
model = AutoModelForCausalLM.from_pretrained(args.model, torch_dtype=torch_dtype, low_cpu_mem_usage=True).to(
device
Expand Down
1 change: 0 additions & 1 deletion examples/vision/object-detection/quantize_owl_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@


def detect(model, processor, image, texts):

inputs = processor(text=texts, images=image, return_tensors="pt").to(model.device)

# forward pass
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/library/extensions/extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@


class Extension(object):

def __init__(
self,
name: str,
Expand Down
2 changes: 0 additions & 2 deletions optimum/quanto/models/diffusers_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@


class QuantizedDiffusersModel(ModelHubMixin):

BASE_NAME = "quanto"
base_class = None

Expand Down Expand Up @@ -188,5 +187,4 @@ def _save_pretrained(self, save_directory: Path) -> None:


class QuantizedPixArtTransformer2DModel(QuantizedDiffusersModel):

base_class = PixArtTransformer2DModel
2 changes: 0 additions & 2 deletions optimum/quanto/models/transformers_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@


class QuantizedTransformersModel(ModelHubMixin):

BASE_NAME = "quanto"
auto_class = None

Expand Down Expand Up @@ -178,5 +177,4 @@ def _save_pretrained(self, save_directory: Path) -> None:


class QuantizedModelForCausalLM(QuantizedTransformersModel):

auto_class = AutoModelForCausalLM
1 change: 0 additions & 1 deletion optimum/quanto/subpackage/commands/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@

@optimum_cli_subcommand()
class QuantoCommand(BaseOptimumCLICommand):

COMMAND = CommandInfo(name="quanto", help="Hugging Face models quantization tools")
SUBCOMMANDS = (
CommandInfo(
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/activations/qbytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@


class ActivationQBytesQuantizer(Function):

@staticmethod
def forward(ctx, base: torch.Tensor, qtype: qtype, scale: torch.Tensor) -> torch.Tensor:
if qtype.bits != 8:
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/optimizers/absmax_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


class AbsmaxOptimizer(SymmetricOptimizer):

def optimize(
self, base: torch.Tensor, qtype: qtype, axis: Optional[int] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/optimizers/affine_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@


class AffineOptimizer(Optimizer):

def __call__(
self,
base: torch.Tensor,
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/optimizers/max_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


class MaxOptimizer(AffineOptimizer):

def optimize(
self, base: torch.Tensor, qtype: qtype, axis: int
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/optimizers/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@


class Optimizer(ABC):

def __call__(
self, base: torch.Tensor, bits: int, axis: int, group_size: Optional[int] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/optimizers/symmetric_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@


class SymmetricOptimizer(Optimizer):

def __call__(self, base: torch.Tensor, qtype: qtype, axis: Optional[int] = None) -> torch.Tensor:
if axis not in [None, 0, -1]:
raise ValueError("axis parameter must be None, 0 (first axis) or -1 (last axis)")
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/qbits.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def backward(ctx, gO):


class QBitsTensor(QTensor):

def __init__(self, qtype, axis, group_size, size, stride, data, scale, shift, requires_grad=False):
super().__init__(qtype, axis)
self._data = data
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/qbytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@ def backward(ctx, gO):


class QBytesTensor(QTensor):

def __init__(self, qtype, axis, size, stride, data, scale, requires_grad=False):
super().__init__(qtype, axis)
self._data = data
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/qtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ def qfallback(callable, *args, **kwargs):


class QTensor(torch.Tensor):

def __init__(self, qtype, axis):
self._qtype = qtype
self._axis = axis
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/weights/marlin/fp8/qbits.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def __new__(cls, qtype, axis, size, stride, data, scale, requires_grad=False):
)

def __init__(self, qtype, axis, size, stride, data, scale, requires_grad=False):

assert axis == 0
assert data.ndim == 2

Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/weights/qbits.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@


class WeightsQBitsQuantizer(Function):

@staticmethod
def forward(
ctx,
Expand Down
1 change: 0 additions & 1 deletion optimum/quanto/tensor/weights/qbytes.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@


class WeightQBytesQuantizer(Function):

@staticmethod
def forward(
ctx, base: torch.Tensor, qtype: qtype, axis: int, scale: torch.Tensor, activation_qtype: qtype, optimized: bool
Expand Down
4 changes: 1 addition & 3 deletions test/library/test_quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,9 +96,7 @@ def test_affine_quantize(input_shape, dtype, qtype, axis, group_size, shift_mode
"zeropoint": 6e-2,
"float": 5e-2,
},
}[
qtype
][shift_mode]
}[qtype][shift_mode]
if group_size is not None:
qa = ungroup(qa, axis=axis, orig_shape=a.shape)
assert_similar(a, qa, atol=atol)
Expand Down
1 change: 0 additions & 1 deletion test/quantize/test_quantize_patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def forward(self, inputs):


class ClassificationModel(torch.nn.Module):

def __init__(self, input_size, output_size, hidden_size, classes):
super().__init__()
self.model = MLP(input_size, output_size, hidden_size)
Expand Down
4 changes: 1 addition & 3 deletions test/tensor/weights/test_weight_qbits_tensor_quantize.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,7 @@ def test_weight_qbits_tensor_quantize(input_shape, dtype, qtype, axis, group_siz
"zeropoint": 6e-2,
"float": 5e-2,
},
}[
qtype
][shift_mode]
}[qtype][shift_mode]
assert_similar(a, qa, atol=atol)


Expand Down

0 comments on commit b0cce24

Please sign in to comment.