Skip to content

Commit

Permalink
Merge pull request #79 from finch-tensor/kbd-add-galley
Browse files Browse the repository at this point in the history
Add Galley option to compute()
  • Loading branch information
kylebd99 authored Dec 5, 2024
2 parents ee52da2 + 4a973ab commit 79b6f0d
Show file tree
Hide file tree
Showing 6 changed files with 143 additions and 46 deletions.
5 changes: 3 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "finch-tensor"
version = "0.1.35"
version = "0.2.0"
description = ""
authors = ["Willow Ahrens <[email protected]>"]
readme = "README.md"
Expand All @@ -18,7 +18,8 @@ pre-commit = "^3.6.0"
pytest-cov = "^4.1.0"
sparse = "^0.15.1"
scipy = "^1.7"
numba = ">=0.55.0"
numba = ">=0.55.0,<0.61.0rc1"
llvmlite = "<0.44.0rc1"

[build-system]
requires = ["poetry-core"]
Expand Down
3 changes: 3 additions & 0 deletions src/finch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,9 @@
lazy,
compiled,
compute,
set_optimizer,
DefaultScheduler,
GalleyScheduler,
)
from .dtypes import (
int_,
Expand Down
61 changes: 42 additions & 19 deletions src/finch/compiled.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,54 @@
from .tensor import Tensor


def compiled(func):
@wraps(func)
def wrapper_func(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, Tensor) and not jl.isa(arg._obj, jl.Finch.LazyTensor):
new_args.append(Tensor(jl.Finch.LazyTensor(arg._obj)))
else:
new_args.append(arg)

result = func(*new_args, **kwargs)
result_tensor = Tensor(jl.Finch.compute(result._obj))

return result_tensor

return wrapper_func

def compiled(opt=""):
def inner(func):
@wraps(func)
def wrapper_func(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, Tensor) and not jl.isa(arg._obj, jl.Finch.LazyTensor):
new_args.append(Tensor(jl.Finch.LazyTensor(arg._obj)))
else:
new_args.append(arg)
result = func(*new_args, **kwargs)
kwargs = {"ctx": get_scheduler(name=opt)} if opt != "" else {}
result_tensor = Tensor(jl.Finch.compute(result._obj, **kwargs))
return result_tensor
return wrapper_func

return inner

def lazy(tensor: Tensor):
if tensor.is_computed():
return Tensor(jl.Finch.LazyTensor(tensor._obj))
return tensor

class AbstractScheduler():
pass

class GalleyScheduler(AbstractScheduler):
def __init__(self, verbose=False):
self.verbose=verbose

def get_julia_scheduler(self):
return jl.Finch.galley_scheduler(verbose=self.verbose)

class DefaultScheduler(AbstractScheduler):
def __init__(self, verbose=False):
self.verbose=verbose

def get_julia_scheduler(self):
return jl.Finch.default_scheduler(verbose=self.verbose)

def set_optimizer(opt):
jl.Finch.set_scheduler_b(opt.get_julia_scheduler())
return

def compute(tensor: Tensor, *, verbose: bool = False):
def compute(tensor: Tensor, *, verbose: bool = False, opt=None, tag=-1):
if not tensor.is_computed():
return Tensor(jl.Finch.compute(tensor._obj, verbose=verbose))
if opt == None:
return Tensor(jl.Finch.compute(tensor._obj, verbose=verbose, tag=tag))
else:
return Tensor(jl.Finch.compute(tensor._obj, verbose=verbose, tag=tag, ctx=opt.get_julia_scheduler()))
return tensor
2 changes: 1 addition & 1 deletion src/finch/julia.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def add_package(name: str, hash: str, version: str) -> None:


_FINCH_NAME = "Finch"
_FINCH_VERSION = "0.6.32"
_FINCH_VERSION = "1.0"
_FINCH_HASH = "9177782c-1635-4eb9-9bfb-d9dfa25e6bce"
_FINCH_REPO_PATH = os.environ.get("FINCH_REPO_PATH", default=None)
_FINCH_REPO_URL = os.environ.get("FINCH_URL_PATH", default=None)
Expand Down
63 changes: 50 additions & 13 deletions tests/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,12 @@

arr1d = np.array([1, 1, 2, 3])

parametrize_optimizer = pytest.mark.parametrize("opt", [finch.DefaultScheduler(), finch.GalleyScheduler()])

def test_eager(arr3d):
@parametrize_optimizer
def test_eager(arr3d, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr3d)
B_finch = finch.Tensor(arr2d)

Expand All @@ -19,12 +23,15 @@ def test_eager(arr3d):
assert_equal(result.todense(), np.multiply(arr3d, arr2d))


def test_lazy_mode(arr3d):
@parametrize_optimizer
def test_lazy_mode(arr3d, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr3d)
B_finch = finch.Tensor(arr2d)
C_finch = finch.Tensor(arr1d)

@finch.compiled
@finch.compiled()
def my_custom_fun(arr1, arr2, arr3):
temp = finch.multiply(arr1, arr2)
temp = finch.divide(temp, arr3)
Expand Down Expand Up @@ -66,7 +73,10 @@ def my_custom_fun(arr1, arr2, arr3):
"trunc",
],
)
def test_elemwise_ops_1_arg(arr3d, func_name):
@parametrize_optimizer
def test_elemwise_ops_1_arg(arr3d, func_name, opt):
finch.set_optimizer(opt)

arr = arr3d + 1.6
A_finch = finch.Tensor(arr)

Expand All @@ -80,7 +90,10 @@ def test_elemwise_ops_1_arg(arr3d, func_name):
"func_name", ["real", "imag", "conj"]
)
@pytest.mark.parametrize("dtype", [np.complex128, np.complex64, np.float64, np.int64])
def test_elemwise_complex_ops_1_arg(func_name, dtype):
@parametrize_optimizer
def test_elemwise_complex_ops_1_arg(func_name, dtype, opt):
finch.set_optimizer(opt)

arr = np.asarray([[1+1j, 2+2j], [3+3j, 4-4j], [-5-5j, -6-6j]]).astype(dtype)
arr_finch = finch.asarray(arr)

Expand All @@ -95,7 +108,10 @@ def test_elemwise_complex_ops_1_arg(func_name, dtype):
"meth_name",
["__pos__", "__neg__", "__abs__", "__invert__"],
)
def test_elemwise_tensor_ops_1_arg(arr3d, meth_name):
@parametrize_optimizer
def test_elemwise_tensor_ops_1_arg(arr3d, meth_name, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr3d)

actual = getattr(A_finch, meth_name)()
Expand All @@ -108,7 +124,10 @@ def test_elemwise_tensor_ops_1_arg(arr3d, meth_name):
"func_name",
["logaddexp", "logical_and", "logical_or", "logical_xor"],
)
def test_elemwise_ops_2_args(arr3d, func_name):
@parametrize_optimizer
def test_elemwise_ops_2_args(arr3d, func_name, opt):
finch.set_optimizer(opt)

arr2d = np.array([[0, 3, 2, 0], [0, 0, 3, 2]])
if func_name.startswith("logical"):
arr3d = arr3d.astype(bool)
Expand Down Expand Up @@ -145,7 +164,10 @@ def test_elemwise_ops_2_args(arr3d, func_name):
"__ne__",
],
)
def test_elemwise_tensor_ops_2_args(arr3d, meth_name):
@parametrize_optimizer
def test_elemwise_tensor_ops_2_args(arr3d, meth_name, opt):
finch.set_optimizer(opt)

arr2d = np.array([[2, 3, 2, 3], [3, 2, 3, 2]])
A_finch = finch.Tensor(arr3d)
B_finch = finch.Tensor(arr2d)
Expand All @@ -158,7 +180,10 @@ def test_elemwise_tensor_ops_2_args(arr3d, meth_name):

@pytest.mark.parametrize("func_name", ["sum", "prod", "max", "min", "any", "all"])
@pytest.mark.parametrize("axis", [None, -1, 1, (0, 1), (0, 1, 2)])
def test_reductions(arr3d, func_name, axis):
@parametrize_optimizer
def test_reductions(arr3d, func_name, axis, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr3d)

actual = getattr(finch, func_name)(A_finch, axis=axis)
Expand All @@ -179,7 +204,10 @@ def test_reductions(arr3d, func_name, axis):
(finch.float64, finch.complex128, np.complex128),
],
)
def test_sum_prod_dtype_arg(arr3d, func_name, axis, in_dtype, dtype, expected_dtype):
@parametrize_optimizer
def test_sum_prod_dtype_arg(arr3d, func_name, axis, in_dtype, dtype, expected_dtype, opt):
finch.set_optimizer(opt)

arr_finch = finch.asarray(np.abs(arr3d), dtype=in_dtype)

actual = getattr(finch, func_name)(arr_finch, axis=axis, dtype=dtype).todense()
Expand All @@ -205,7 +233,10 @@ def test_sum_prod_dtype_arg(arr3d, func_name, axis, in_dtype, dtype, expected_dt
),
],
)
def test_tensordot(arr3d, storage):
@parametrize_optimizer
def test_tensordot(arr3d, storage, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr1d)
B_finch = finch.Tensor(arr2d)
C_finch = finch.Tensor(arr3d)
Expand Down Expand Up @@ -233,7 +264,10 @@ def test_tensordot(arr3d, storage):
assert_equal(actual.todense(), expected)


def test_matmul(arr2d, arr3d):
@parametrize_optimizer
def test_matmul(arr2d, arr3d, opt):
finch.set_optimizer(opt)

A_finch = finch.Tensor(arr2d)
B_finch = finch.Tensor(arr2d.T)
C_finch = finch.permute_dims(A_finch, (1, 0))
Expand All @@ -250,7 +284,10 @@ def test_matmul(arr2d, arr3d):
A_finch @ D_finch


def test_negative__mod__():
@parametrize_optimizer
def test_negative__mod__(opt):
finch.set_optimizer(opt)

arr = np.array([-1, 0, 0, -2, -3, 0])
arr_finch = finch.asarray(arr)

Expand Down
Loading

0 comments on commit 79b6f0d

Please sign in to comment.