Skip to content

Commit

Permalink
Drop python 3.9. Add creation functions.
Browse files Browse the repository at this point in the history
  • Loading branch information
mtsokol committed Apr 29, 2024
1 parent 874fd1b commit 8acb715
Show file tree
Hide file tree
Showing 12 changed files with 170 additions and 638 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest]
python: ['3.9', '3.10', '3.11']
python: ['3.10', '3.11', '3.12']
include:
- os: macos-latest
python: '3.9'
python: '3.10'
- os: windows-latest
python: '3.9'
python: '3.10'
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
Expand Down
8 changes: 6 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
junit/

# Translations
*.mo
Expand Down Expand Up @@ -99,7 +100,7 @@ ipython_config.py
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
poetry.lock

# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
Expand Down Expand Up @@ -157,4 +158,7 @@ cython_debug/
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.idea/

# mac os
.DS_Store
600 changes: 0 additions & 600 deletions poetry.lock

This file was deleted.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
[tool.poetry]
name = "finch-tensor"
version = "0.1.12"
version = "0.1.14"
description = ""
authors = ["Willow Ahrens <[email protected]>"]
readme = "README.md"
packages = [{include = "finch", from = "src"}]

[tool.poetry.dependencies]
python = "^3.9"
python = "^3.10"
juliapkg = "^0.1.10"
juliacall = "^0.9.15"
numpy = "^1.19"
Expand Down
9 changes: 0 additions & 9 deletions requirements.txt

This file was deleted.

24 changes: 24 additions & 0 deletions src/finch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,18 @@
round,
floor,
ceil,
full,
full_like,
ones,
ones_like,
zeros,
zeros_like,
bitwise_and,
bitwise_or,
bitwise_left_shift,
bitwise_right_shift,
bitwise_xor,
bitwise_invert,
)
from .compiled import (
lazy,
Expand Down Expand Up @@ -168,4 +180,16 @@
"round",
"floor",
"ceil",
"full",
"full_like",
"ones",
"ones_like",
"zeros",
"zeros_like",
"bitwise_and",
"bitwise_or",
"bitwise_left_shift",
"bitwise_right_shift",
"bitwise_xor",
"bitwise_invert",
]
5 changes: 5 additions & 0 deletions src/finch/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,8 @@
complex64: jl.DataType = jl.ComplexF32
complex128: jl.DataType = jl.ComplexF64
bool: jl.DataType = jl.Bool

number: jl.DataType = jl.Number
complex: jl.DataType = jl.Complex
integer: jl.DataType = jl.Integer
abstract_float: jl.DataType = jl.AbstractFloat
2 changes: 1 addition & 1 deletion src/finch/julia.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import juliapkg

_FINCH_VERSION = "0.6.22"
_FINCH_VERSION = "0.6.23"
_FINCH_HASH = "9177782c-1635-4eb9-9bfb-d9dfa25e6bce"

deps = juliapkg.deps.load_cur_deps()
Expand Down
121 changes: 103 additions & 18 deletions src/finch/tensor.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import builtins
from typing import Callable, Iterable, Optional, Union, Literal
from typing import Any, Callable, Optional, Iterable, Literal

import numpy as np
from numpy.core.numeric import normalize_axis_index, normalize_axis_tuple
Expand Down Expand Up @@ -79,7 +79,7 @@ class Tensor(_Display, SparseArray):

def __init__(
self,
obj: Union[np.ndarray, spmatrix, Storage, JuliaObj],
obj: np.ndarray | spmatrix | Storage | JuliaObj,
/,
*,
fill_value: np.number = 0.0,
Expand Down Expand Up @@ -143,22 +143,40 @@ def __abs__(self):
return self._elemwise_op("abs")

def __invert__(self):
return self._elemwise_op("~")
return self._elemwise_op(".~")

def __and__(self, other):
return self._elemwise_op("&", other)
return self._elemwise_op(".&", other)

def __or__(self, other):
return self._elemwise_op("|", other)
return self._elemwise_op(".|", other)

def __xor__(self, other):
return self._elemwise_op("xor", other)

def __lshift__(self, other):
return self._elemwise_op("<<", other)
return self._elemwise_op(".<<", other)

def __rshift__(self, other):
return self._elemwise_op(">>", other)
return self._elemwise_op(".>>", other)

def __lt__(self, other):
return self._elemwise_op(".<", other)

def __le__(self, other):
return self._elemwise_op(".<=", other)

def __gt__(self, other):
return self._elemwise_op(".>", other)

def __ge__(self, other):
return self._elemwise_op(".>=", other)

def __eq__(self, other):
return self._elemwise_op(".==", other)

def __ne__(self, other):
return self._elemwise_op(".!=", other)

def _elemwise_op(self, op: str, other: Optional["Tensor"] = None) -> "Tensor":
if other is None:
Expand Down Expand Up @@ -303,7 +321,7 @@ def to_device(self, device: Storage) -> "Tensor":
return Tensor(self._from_other_tensor(self, storage=device))

@classmethod
def _from_other_tensor(cls, tensor: "Tensor", storage: Optional[Storage]) -> JuliaObj:
def _from_other_tensor(cls, tensor: "Tensor", storage: Storage | None) -> JuliaObj:
order = cls.preprocess_order(storage.order, tensor.ndim)
return jl.swizzle(
jl.Tensor(storage.levels_descr._obj, tensor._obj.body), *order
Expand Down Expand Up @@ -469,6 +487,16 @@ def to_scipy_sparse(self):
else:
raise ValueError("Tensor can't be converted to scipy.sparse object.")

def __array_namespace__(self, *, api_version: str | None = None) -> Any:
if api_version is None:
api_version = "2023.12"

if api_version not in {"2021.12", "2022.12", "2023.12"}:
raise ValueError(f'"{api_version}" Array API version not supported.')
import finch

return finch


def random(shape, density=0.01, random_state=None):
args = [*shape, density]
Expand Down Expand Up @@ -515,6 +543,39 @@ def asarray(obj, /, *, dtype=None, format=None):
return tensor


def full(
shape: int | tuple[int, ...],
fill_value: jl_dtypes.number,
*,
dtype: DType | None = None,
) -> Tensor:
if isinstance(shape, int):
shape = (shape,)
if dtype is not None:
fill_value = dtype(fill_value)
return Tensor(jl.Tensor(jl.SparseCOO[len(shape)](jl.Element(fill_value)), *shape))


def full_like(x: Tensor, /, fill_value: jl_dtypes.number, *, dtype: DType | None = None) -> Tensor:
return full(x.shape, fill_value, dtype=dtype)


def ones(shape: int | tuple[int, ...], *, dtype: DType | None = None) -> Tensor:
return full(shape, jl_dtypes.int64(1), dtype=dtype)


def ones_like(x: Tensor, /, *, dtype: DType | None = None) -> Tensor:
return ones(x.shape, dtype=dtype)


def zeros(shape: int | tuple[int, ...], *, dtype: DType | None = None) -> Tensor:
return full(shape, jl_dtypes.int64(0), dtype=dtype)


def zeros_like(x: Tensor, /, *, dtype: DType | None = None) -> Tensor:
return zeros(x.shape, dtype=dtype)


def permute_dims(x: Tensor, axes: tuple[int, ...]):
return x.permute_dims(axes)

Expand Down Expand Up @@ -573,8 +634,8 @@ def sum(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
dtype: Union[jl.DataType, None] = None,
axis: int | tuple[int, ...] | None = None,
dtype: DType | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.sum, axis, dtype)
Expand All @@ -584,8 +645,8 @@ def prod(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
dtype: Union[jl.DataType, None] = None,
axis: int | tuple[int, ...] | None = None,
dtype: DType | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.prod, axis, dtype)
Expand All @@ -595,7 +656,7 @@ def max(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.maximum, axis)
Expand All @@ -605,7 +666,7 @@ def min(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.minimum, axis)
Expand All @@ -615,7 +676,7 @@ def any(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.any, axis)
Expand All @@ -625,19 +686,19 @@ def all(
x: Tensor,
/,
*,
axis: Union[int, tuple[int, ...], None] = None,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
) -> Tensor:
return _reduce(x, jl.all, axis)


def eye(
n_rows: int,
n_cols: Optional[int] = None,
n_cols: int | None = None,
/,
*,
k: int = 0,
dtype: Optional[DType] = None,
dtype: DType | None = None,
format: Literal["coo", "dense"] = "coo",
) -> Tensor:
n_cols = n_rows if n_cols is None else n_cols
Expand Down Expand Up @@ -809,6 +870,30 @@ def atan2(x: Tensor, other: Tensor, /) -> Tensor:
return x._elemwise_op("atand", other)


def bitwise_and(x1: Tensor, x2: Tensor, /) -> Tensor:
return x1 & x2


def bitwise_or(x1: Tensor, x2: Tensor, /) -> Tensor:
return x1 | x2


def bitwise_left_shift(x1: Tensor, x2: Tensor, /) -> Tensor:
return x1 << x2


def bitwise_right_shift(x1: Tensor, x2: Tensor, /) -> Tensor:
return x1 >> x2


def bitwise_xor(x1: Tensor, x2: Tensor, /) -> Tensor:
return x1 ^ x2


def bitwise_invert(x: Tensor, /) -> Tensor:
return ~x


def _is_scipy_sparse_obj(x):
return hasattr(x, "__module__") and x.__module__.startswith("scipy.sparse")

Expand Down
2 changes: 1 addition & 1 deletion src/finch/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,6 @@

JuliaObj = jc.AnyValue

DType = jl.DataType
DType = jc.AnyValue # represents jl.DataType

spmatrix = Any
5 changes: 3 additions & 2 deletions tests/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def test_elemwise_ops_1_arg(arr3d, func_name):


@pytest.mark.parametrize(
"meth_name", ["__pos__", "__neg__", "__abs__"],
"meth_name", ["__pos__", "__neg__", "__abs__", "__invert__"],
)
def test_elemwise_tensor_ops_1_arg(arr3d, meth_name):
A_finch = finch.Tensor(arr3d)
Expand All @@ -73,7 +73,8 @@ def test_elemwise_tensor_ops_1_arg(arr3d, meth_name):
@pytest.mark.parametrize(
"meth_name",
["__add__", "__mul__", "__sub__", "__truediv__", # "__floordiv__", "__mod__",
"__pow__", "__and__", "__or__", "__xor__", "__lshift__", "__rshift__"],
"__pow__", "__and__", "__or__", "__xor__", "__lshift__", "__rshift__",
"__lt__", "__le__", "__gt__", "__ge__", "__eq__", "__ne__"],
)
def test_elemwise_tensor_ops_2_args(arr3d, meth_name):
arr2d = np.array([[2, 3, 2, 3], [3, 2, 3, 2]])
Expand Down
Loading

0 comments on commit 8acb715

Please sign in to comment.