Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unnecessary CUDA utilities #4855

Merged
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions benchmarks/cugraph/pytest-based/bench_algos.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
# Copyright (c) 2020-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs):
from cugraph.structure.number_map import NumberMap
from cugraph.generators import rmat
from cugraph.testing import utils, mg_utils
from cugraph.utilities.utils import is_device_version_less_than

from cugraph_benchmarking.params import (
directed_datasets,
Expand Down Expand Up @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph):
gpubenchmark(sorensen, G, vert_pairs)


@pytest.mark.skipif(
is_device_version_less_than((7, 0)), reason="Not supported on Pascal"
)
def bench_louvain(gpubenchmark, graph):
louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain
gpubenchmark(louvain, graph)
Expand Down
17 changes: 1 addition & 16 deletions ci/notebook_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
import glob
from pathlib import Path

bdice marked this conversation as resolved.
Show resolved Hide resolved
from cuda.bindings import runtime

# for adding another run type and skip file name add to this dictionary
runtype_dict = {
"all": "",
Expand All @@ -41,16 +39,7 @@ def _get_cuda_version_string():
minor //= 10
return f"{major}.{minor}"


def _is_ampere_or_newer():
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return (device_prop.major, device_prop.minor) >= (8, 0)

cuda_version_string = _get_cuda_version_string()

parser = argparse.ArgumentParser(description="Condition for running the notebook tests")
parser.add_argument("runtype", type=str)
Expand Down Expand Up @@ -86,10 +75,6 @@ def _is_ampere_or_newer():
)
skip = True
break
elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line):
print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr)
skip = True
break
elif re.search("# Does not run on CUDA ", line) and (
cuda_version_string in line
):
Expand Down
3 changes: 1 addition & 2 deletions python/cugraph/cugraph/traversal/ms_bfs.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Copyright (c) 2021-2025, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Expand Down Expand Up @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None):

# Fixme not implemented in RMM yet
# using 96GB upper bound for now
Copy link
Contributor Author

@bdice bdice Jan 8, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should probably use rmm.mr.available_device_memory() but this function is currently unreachable due to the NotImplementedError thrown on line 149 below. It seems it would be better to rework this entirely once the feature is implemented.

# mem = get_device_memory_info()
mem = 9.6e10
n_sources = sources.size
V = G.number_of_vertices()
Expand Down
40 changes: 0 additions & 40 deletions python/cugraph/cugraph/utilities/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
import cudf
from cudf.core.column import as_column

from cuda.bindings import runtime

from warnings import warn

# optional dependencies
Expand Down Expand Up @@ -207,44 +205,6 @@ def get_traversed_path_list(df, id):
return answer


def is_cuda_version_less_than(min_version):
"""
Returns True if the version of CUDA being used is less than min_version
"""
status, version = runtime.getLocalRuntimeVersion()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA runtime version.")
major = version // 1000
minor = (version % 1000) // 10
return (major, minor) < min_version


def is_device_version_less_than(min_version):
"""
Returns True if the version of CUDA being used is less than min_version
"""
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return (device_prop.major, device_prop.minor) < min_version


def get_device_memory_info():
"""
Returns the total amount of global memory on the device in bytes
"""
status, device_id = runtime.cudaGetDevice()
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device.")
status, device_prop = runtime.cudaGetDeviceProperties(device_id)
if status != runtime.cudaError_t.cudaSuccess:
raise RuntimeError("Could not get CUDA device properties.")
return device_prop.totalGlobalMem


# FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if
# set. An additional optional parameter for the weight attr name when accepting
# Nx graphs may be needed. From the Nx docs:
Expand Down
Loading