Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Includes all deferred conversion costs in benchmarks #34

Merged
Merged
43 changes: 26 additions & 17 deletions benchmarks/nx-cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,21 @@ def get_backend_wrapper(backend_name):
"backend" kwarg on it.
"""

def wrap_callable_for_dispatch(func, exhaust_returned_iterator=False):
# FIXME: consider a pytest param to run force_unlazy_eval=False for
# benchmarks that set it to True in order to measure the time spent for any
# deferred computation/conversions.
def wrap_callable_for_dispatch(func, force_unlazy_eval=False):
# force_unlazy_eval=True forces iterators and other containers to
# generate a complete set of results in order to include any deferred
# compute or conversion in the benchmark
def wrapper(*args, **kwargs):
kwargs["backend"] = backend_name
retval = func(*args, **kwargs)
if exhaust_returned_iterator:
retval = list(retval)
if force_unlazy_eval:
if isinstance(retval, Mapping):
retval = dict(retval)
else:
retval = list(retval)
return retval

return wrapper
Expand Down Expand Up @@ -440,7 +449,7 @@ def bench_shortest_path(benchmark, graph_obj, backend_wrapper):
node = get_highest_degree_node(graph_obj)

result = benchmark.pedantic(
target=backend_wrapper(nx.shortest_path),
target=backend_wrapper(nx.shortest_path, force_unlazy_eval=True),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be nice to be able to run this benchmark with both force_unlazy_eval=True and force_unlazy_eval=False.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it would be nice to be able to run this benchmark with both force_unlazy_eval=True and force_unlazy_eval=False.

Yes I agree that's useful. I'll add a FIXME/issue to parameterize that. FWIW, here's the results with/without when I ran locally:

image

Fortunately run times don't have a noticeable change until the hollywood (57M edges) or larger datasets.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

#44

args=(G,),
kwargs=dict(
source=node,
Expand Down Expand Up @@ -474,7 +483,7 @@ def bench_single_target_shortest_path_length(benchmark, graph_obj, backend_wrapp
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(
nx.single_target_shortest_path_length, exhaust_returned_iterator=True
nx.single_target_shortest_path_length, force_unlazy_eval=True
),
args=(G,),
kwargs=dict(
Expand All @@ -484,9 +493,11 @@ def bench_single_target_shortest_path_length(benchmark, graph_obj, backend_wrapp
iterations=iterations,
warmup_rounds=warmup_rounds,
)
# exhaust_returned_iterator=True forces the result to a list, but is not
# needed for this algo in NX 3.3+ which returns a dict instead of an
# iterator. Forcing to a list does not change the benchmark timing.
# force_unlazy_eval=True forces iterators and other containers to generate
# a complete set of results (in order to include any deferred compute or
# conversion in the benchmark), but is not needed for this algo in NX 3.3+
# since it returns a dict instead of an iterator. Forcing eval does not
# change the benchmark timing.
assert type(result) is list


Expand Down Expand Up @@ -525,7 +536,7 @@ def bench_generic_bfs_edges(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.generic_bfs_edges, exhaust_returned_iterator=True),
target=backend_wrapper(nx.generic_bfs_edges, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
source=node,
Expand All @@ -541,7 +552,7 @@ def bench_bfs_edges(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.bfs_edges, exhaust_returned_iterator=True),
target=backend_wrapper(nx.bfs_edges, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
source=node,
Expand All @@ -557,7 +568,7 @@ def bench_bfs_layers(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.bfs_layers, exhaust_returned_iterator=True),
target=backend_wrapper(nx.bfs_layers, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
sources=node,
Expand All @@ -573,7 +584,7 @@ def bench_bfs_predecessors(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.bfs_predecessors, exhaust_returned_iterator=True),
target=backend_wrapper(nx.bfs_predecessors, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
source=node,
Expand All @@ -589,7 +600,7 @@ def bench_bfs_successors(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
node = get_highest_degree_node(graph_obj)
result = benchmark.pedantic(
target=backend_wrapper(nx.bfs_successors, exhaust_returned_iterator=True),
target=backend_wrapper(nx.bfs_successors, force_unlazy_eval=True),
args=(G,),
kwargs=dict(
source=node,
Expand Down Expand Up @@ -773,7 +784,7 @@ def bench_strongly_connected_components(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
result = benchmark.pedantic(
target=backend_wrapper(
nx.strongly_connected_components, exhaust_returned_iterator=True
nx.strongly_connected_components, force_unlazy_eval=True
),
args=(G,),
rounds=rounds,
Expand Down Expand Up @@ -816,9 +827,7 @@ def bench_triangles(benchmark, graph_obj, backend_wrapper):
def bench_weakly_connected_components(benchmark, graph_obj, backend_wrapper):
G = get_graph_obj_for_benchmark(graph_obj, backend_wrapper)
result = benchmark.pedantic(
target=backend_wrapper(
nx.weakly_connected_components, exhaust_returned_iterator=True
),
target=backend_wrapper(nx.weakly_connected_components, force_unlazy_eval=True),
args=(G,),
rounds=rounds,
iterations=iterations,
Expand Down
Loading