Skip to content

Commit

Permalink
chore: add mypy and fix related issues. include mypy in linting githu…
Browse files Browse the repository at this point in the history
…b action.
  • Loading branch information
William Bakst committed Nov 9, 2023
1 parent 2e8b7eb commit 25c32d7
Show file tree
Hide file tree
Showing 14 changed files with 128 additions and 90 deletions.
12 changes: 10 additions & 2 deletions .github/workflows/ruff.yml → .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Ruff
name: Lint
run-name: ${{ github.actor }} is linting the package

on:
Expand All @@ -14,4 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1

- name: Run Ruff
uses: chartboost/ruff-action@v1

- name: Install and run mypy
run: pip install mypy

- name: Run mypy
run: python -m mypy .
59 changes: 58 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@ torch = ">=2.0.0, !=2.0.1, !=2.1.0"
tqdm = "^4.65.0"

[tool.poetry.group.dev.dependencies]
pytest = "^7.4.0"
mkdocs = "^1.4.3"
mkdocs-material = "^9.1.18"
mkdocstrings = "^0.22.0"
mkdocstrings-python = "^1.1.2"
mypy = "^1.6.1"
pytest = "^7.4.0"
ruff = "^0.1.5"

[tool.ruff]
Expand Down
15 changes: 0 additions & 15 deletions pytorch_lattice/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,21 +40,6 @@ class InputKeypointsType(_Enum):
# LEARNED = "learned_interior"


class FeatureType(_Enum):
"""Type of feature.
- UNKNOWN: a feature with a type that our system does not currently support.
- NUMERICAL: a numerical feature that should be calibrated using an instance of
`NumericalCalibrator`.
- CATEGORICAL: a categorical feature that should be calibrated using an instance of
`CategoricalCalibrator`.
"""

UNKNOWN = "unknown"
NUMERICAL = "numerical"
CATEGORICAL = "categorical"


class NumericalCalibratorInit(_Enum):
"""Type of kernel initialization to use for NumericalCalibrator.
Expand Down
16 changes: 8 additions & 8 deletions pytorch_lattice/layers/categorical_calibrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
mapping a given category to its learned output value.
"""
from collections import defaultdict
from typing import List, Optional, Tuple
from typing import Optional

import torch
from graphlib import CycleError, TopologicalSorter
Expand Down Expand Up @@ -45,7 +45,7 @@ def __init__(
missing_input_value: Optional[float] = None,
output_min: Optional[float] = None,
output_max: Optional[float] = None,
monotonicity_pairs: Optional[List[Tuple[int, int]]] = None,
monotonicity_pairs: Optional[list[tuple[int, int]]] = None,
kernel_init: CategoricalCalibratorInit = CategoricalCalibratorInit.UNIFORM,
) -> None:
"""Initializes an instance of `CategoricalCalibrator`.
Expand Down Expand Up @@ -104,14 +104,14 @@ def __init__(
init_value = 0.0
torch.nn.init.constant_(self.kernel, init_value)
elif kernel_init == CategoricalCalibratorInit.UNIFORM:
if output_min is None and output_max is None:
low, high = -0.05, 0.05
elif output_min is None:
if output_min is not None and output_max is not None:
low, high = output_min, output_max
elif output_min is None and output_max is not None:
low, high = output_max - 0.05, output_max
elif output_max is None:
elif output_min is not None and output_max is None:
low, high = output_min, output_min + 0.05
else:
low, high = output_min, output_max
low, high = -0.05, 0.05
torch.nn.init.uniform_(self.kernel, low, high)
else:
raise ValueError(f"Unknown kernel init: {kernel_init}")
Expand All @@ -135,7 +135,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch.mm(one_hot, self.kernel)

@torch.no_grad()
def assert_constraints(self, eps=1e-6) -> List[str]:
def assert_constraints(self, eps=1e-6) -> list[str]:
"""Asserts that layer satisfies specified constraints.
This checks that weights at the indexes of monotonicity pairs are in the correct
Expand Down
6 changes: 3 additions & 3 deletions pytorch_lattice/layers/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
single-dimensional input and transforms it using a linear transformation and optionally
a bias term. This module supports monotonicity constraints.
"""
from typing import List, Optional
from typing import Optional

import torch

Expand Down Expand Up @@ -44,7 +44,7 @@ class Linear(torch.nn.Module):
def __init__(
self,
input_dim,
monotonicities: Optional[List[Monotonicity]] = None,
monotonicities: Optional[list[Monotonicity]] = None,
use_bias: bool = True,
weighted_average: bool = False,
) -> None:
Expand Down Expand Up @@ -98,7 +98,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return result

@torch.no_grad()
def assert_constraints(self, eps=1e-6) -> List[str]:
def assert_constraints(self, eps=1e-6) -> list[str]:
"""Asserts that layer satisfies specified constraints.
This checks that decreasing monotonicity corresponds to negative weights,
Expand Down
29 changes: 16 additions & 13 deletions pytorch_lattice/layers/numerical_calibrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
satisfy desired bounds and monotonicity constraints.
"""
from collections import defaultdict
from typing import List, Optional, Tuple
from typing import Optional

import numpy as np
import torch
Expand Down Expand Up @@ -82,11 +82,11 @@ def __init__(

# Determine default output initialization values if bounds are not fully set.
if output_min is not None and output_max is not None:
output_init_min, output_init_max = self.output_min, self.output_max
output_init_min, output_init_max = output_min, output_max
elif output_min is not None:
output_init_min, output_init_max = self.output_min, self.output_min + 4.0
output_init_min, output_init_max = output_min, output_min + 4.0
elif output_max is not None:
output_init_min, output_init_max = self.output_max - 4.0, self.output_max
output_init_min, output_init_max = output_max - 4.0, output_max
else:
output_init_min, output_init_max = -2.0, 2.0
self._output_init_min, self._output_init_max = output_init_min, output_init_max
Expand All @@ -111,11 +111,10 @@ def initialize_kernel() -> torch.Tensor:
raise ValueError(f"Unknown kernel init: {self.kernel_init}")

if monotonicity == Monotonicity.DECREASING:
bias = self._output_init_max
bias = torch.tensor([[self._output_init_max]])
heights = -heights
else:
bias = self._output_init_min
bias = torch.tensor([[bias]])
bias = torch.tensor([[self._output_init_min]])
return torch.cat((bias, heights), 0).double()

self.kernel = torch.nn.Parameter(initialize_kernel())
Expand Down Expand Up @@ -151,7 +150,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return result

@torch.no_grad()
def assert_constraints(self, eps=1e-6) -> List[str]:
def assert_constraints(self, eps=1e-6) -> list[str]:
"""Asserts that layer satisfies specified constraints.
This checks that weights follow monotonicity constraints and that the output is
Expand Down Expand Up @@ -212,8 +211,12 @@ def constrain(self) -> None:
return

original_bias, original_heights = self.kernel.data[0:1], self.kernel.data[1:]
previous_bias_delta = defaultdict(lambda: torch.zeros_like(original_bias))
previous_heights_delta = defaultdict(lambda: torch.zeros_like(original_heights))
previous_bias_delta: dict[str, torch.Tensor] = defaultdict(
lambda: torch.zeros_like(original_bias)
)
previous_heights_delta: dict[str, torch.Tensor] = defaultdict(
lambda: torch.zeros_like(original_heights)
)

def apply_bound_constraints(bias, heights):
previous_bias = bias - previous_bias_delta["BOUNDS"]
Expand Down Expand Up @@ -291,7 +294,7 @@ def keypoints_outputs(self) -> torch.Tensor:

def _project_monotonic_bounds(
self, bias: torch.Tensor, heights: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
) -> tuple[torch.Tensor, torch.Tensor]:
"""Projects bias and heights into bounds considering monotonicity.
For computation simplification in the case of decreasing monotonicity, we mirror
Expand Down Expand Up @@ -333,7 +336,7 @@ def _project_monotonic_bounds(

def _approximately_project_bounds_only(
self, bias: torch.Tensor, heights: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
) -> tuple[torch.Tensor, torch.Tensor]:
"""Projects bias and heights without considering monotonicity.
It is worth noting that this projection is an approximation and is not an exact
Expand Down Expand Up @@ -367,7 +370,7 @@ def _project_monotonicity(self, heights: torch.Tensor) -> torch.Tensor:

def _squeeze_by_scaling(
self, bias: torch.Tensor, heights: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
) -> tuple[torch.Tensor, torch.Tensor]:
"""Squeezes monotonic calibrators by scaling them into bound constraints.
It is worth noting that this is not an exact projection with respect to the L2
Expand Down
8 changes: 4 additions & 4 deletions pytorch_lattice/models/calibrated_linear.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""Class for easily constructing a calibrated linear model."""
from typing import Dict, List, Optional, Union
from typing import Optional, Union

import torch

Expand Down Expand Up @@ -53,7 +53,7 @@ class CalibratedLinear(torch.nn.Module):

def __init__(
self,
features: List[Union[NumericalFeature, CategoricalFeature]],
features: list[Union[NumericalFeature, CategoricalFeature]],
output_min: Optional[float] = None,
output_max: Optional[float] = None,
use_bias: bool = True,
Expand Down Expand Up @@ -92,7 +92,7 @@ def __init__(
input_dim=len(features),
monotonicities=self.monotonicities,
use_bias=use_bias,
weighted_average=(
weighted_average=bool(
output_min is not None
or output_max is not None
or output_calibration_num_keypoints
Expand Down Expand Up @@ -123,7 +123,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
return result

@torch.no_grad()
def assert_constraints(self) -> Dict[str, List[str]]:
def assert_constraints(self) -> dict[str, list[str]]:
"""Asserts all layers within model satisfied specified constraints.
Asserts monotonicity pairs and output bounds for categorical calibrators,
Expand Down
Loading

0 comments on commit 25c32d7

Please sign in to comment.