diff --git a/benchmarks/cugraph/pytest-based/bench_algos.py b/benchmarks/cugraph/pytest-based/bench_algos.py index 04407d656d7..1c988ea636a 100644 --- a/benchmarks/cugraph/pytest-based/bench_algos.py +++ b/benchmarks/cugraph/pytest-based/bench_algos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs): from cugraph.structure.number_map import NumberMap from cugraph.generators import rmat from cugraph.testing import utils, mg_utils -from cugraph.utilities.utils import is_device_version_less_than from cugraph_benchmarking.params import ( directed_datasets, @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph): gpubenchmark(sorensen, G, vert_pairs) -@pytest.mark.skipif( - is_device_version_less_than((7, 0)), reason="Not supported on Pascal" -) def bench_louvain(gpubenchmark, graph): louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain gpubenchmark(louvain, graph) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index 659ac4de755..db26f2efa2c 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -41,16 +41,7 @@ def _get_cuda_version_string(): minor //= 10 return f"{major}.{minor}" - -def _is_ampere_or_newer(): - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) >= (8, 0) - +cuda_version_string = _get_cuda_version_string() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -86,10 +77,6 @@ def _is_ampere_or_newer(): ) skip = True break - elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line): - print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) - skip = True - break elif re.search("# Does not run on CUDA ", line) and ( cuda_version_string in line ): diff --git a/python/cugraph/cugraph/traversal/ms_bfs.py b/python/cugraph/cugraph/traversal/ms_bfs.py index df624e453ee..b80331d475a 100644 --- a/python/cugraph/cugraph/traversal/ms_bfs.py +++ b/python/cugraph/cugraph/traversal/ms_bfs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None): # Fixme not implemented in RMM yet # using 96GB upper bound for now - # mem = get_device_memory_info() mem = 9.6e10 n_sources = sources.size V = G.number_of_vertices() diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 074503e2f60..493a9850a0f 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -18,8 +18,6 @@ import cudf from cudf.core.column import as_column -from cuda.bindings import runtime - from warnings import warn # optional dependencies @@ -207,44 +205,6 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, version = runtime.getLocalRuntimeVersion() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA runtime version.") - major = version // 1000 - minor = (version % 1000) // 10 - return (major, minor) < min_version - - -def is_device_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) < min_version - - -def get_device_memory_info(): - """ - Returns the total amount of global memory on the device in bytes - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return device_prop.totalGlobalMem - - # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if # set. An additional optional parameter for the weight attr name when accepting # Nx graphs may be needed. From the Nx docs: