From 04f098499479dcbd7ea301efe047de79ed81d485 Mon Sep 17 00:00:00 2001 From: Alex Barghi <105237337+alexbarghi-nv@users.noreply.github.com> Date: Fri, 3 Jan 2025 14:37:10 -0500 Subject: [PATCH] [BUG] Skip WholeGraph Tests if GPU PyTorch Unavailable (#4820) Skips WholeGraph tests if GPU PyTorch is not available. Required to get tests passing on ARM. In the future, we should move all WholeGraph-dependent code, as well as the bulk sampling API, into `cugraph-gnn` so these errors do not continue. Authors: - Alex Barghi (https://github.com/alexbarghi-nv) - James Lamb (https://github.com/jameslamb) Approvers: - James Lamb (https://github.com/jameslamb) - Don Acosta (https://github.com/acostadon) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/4820 --- .../test_gnn_feat_storage_wholegraph.py | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py b/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py index f760ef3e1b..964449276a 100644 --- a/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py +++ b/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -32,6 +32,20 @@ def get_cudart_version(): return major * 1000 + minor * 10 +pytestmark = [ + pytest.mark.skipif( + isinstance(torch, MissingModule) or not torch.cuda.is_available(), + reason="PyTorch with GPU support not available", + ), + pytest.mark.skipif( + isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available" + ), + pytest.mark.skipif( + get_cudart_version() < 11080, reason="not compatible with CUDA < 11.8" + ), +] + + def runtest(rank: int, world_size: int): torch.cuda.set_device(rank) @@ -69,13 +83,6 @@ def runtest(rank: int, world_size: int): @pytest.mark.sg -@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") -@pytest.mark.skipif( - isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available" -) -@pytest.mark.skipif( - get_cudart_version() < 11080, reason="not compatible with CUDA < 11.8" -) def test_feature_storage_wholegraph_backend(): world_size = torch.cuda.device_count() print("gpu count:", world_size) @@ -87,13 +94,6 @@ def test_feature_storage_wholegraph_backend(): @pytest.mark.mg -@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available") -@pytest.mark.skipif( - isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available" -) -@pytest.mark.skipif( - get_cudart_version() < 11080, reason="not compatible with CUDA < 11.8" -) def test_feature_storage_wholegraph_backend_mg(): world_size = torch.cuda.device_count() print("gpu count:", world_size)