diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index aea81c152e0..c167da995c8 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -13,6 +13,7 @@ jobs: # Please keep pr-builder as the top job here pr-builder: needs: + - check-nightly-ci - changed-files - checks - conda-cpp-build @@ -42,6 +43,18 @@ jobs: - name: Telemetry setup if: ${{ vars.TELEMETRY_ENABLED == 'true' }} uses: rapidsai/shared-actions/telemetry-dispatch-stash-base-env-vars@main + check-nightly-ci: + # Switch to ubuntu-latest once it defaults to a version of Ubuntu that + # provides at least Python 3.11 (see + # https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat) + runs-on: ubuntu-24.04 + env: + RAPIDS_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Check if nightly CI is passing + uses: rapidsai/shared-actions/check_nightly_success/dispatch@main + with: + repo: cugraph changed-files: secrets: inherit needs: telemetry-setup diff --git a/.gitignore b/.gitignore index 9480c2618bf..7853526b22b 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,7 @@ test-results ## Python build directories & artifacts dask-worker-space/ htmlcov +*.conda dist/ *.egg-info/ python/build @@ -40,9 +41,7 @@ wheels/ wheelhouse/ _skbuild/ cufile.log - -## pylibcugraph build directories & artifacts -python/pylibcugraph/pylibcugraph.egg-info +*.whl ## Patching *.diff @@ -89,10 +88,7 @@ docs/cugraph/lib* docs/cugraph/api/* # created by Dask tests -python/dask-worker-space -python/cugraph/dask-worker-space -python/cugraph/cugraph/dask-worker-space -python/cugraph/cugraph/tests/dask-worker-space +dask-worker-space/ # Sphinx docs & build artifacts docs/cugraph/source/api_docs/api/* diff --git a/benchmarks/cugraph/pytest-based/bench_algos.py b/benchmarks/cugraph/pytest-based/bench_algos.py index 04407d656d7..1c988ea636a 100644 --- a/benchmarks/cugraph/pytest-based/bench_algos.py +++ b/benchmarks/cugraph/pytest-based/bench_algos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs): from cugraph.structure.number_map import NumberMap from cugraph.generators import rmat from cugraph.testing import utils, mg_utils -from cugraph.utilities.utils import is_device_version_less_than from cugraph_benchmarking.params import ( directed_datasets, @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph): gpubenchmark(sorensen, G, vert_pairs) -@pytest.mark.skipif( - is_device_version_less_than((7, 0)), reason="Not supported on Pascal" -) def bench_louvain(gpubenchmark, graph): louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain gpubenchmark(louvain, graph) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index f7a284beeeb..db26f2efa2c 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,7 +17,7 @@ import glob from pathlib import Path -from numba import cuda +from cuda.bindings import runtime # for adding another run type and skip file name add to this dictionary runtype_dict = { @@ -30,20 +30,18 @@ def skip_book_dir(runtype): # Add all run types here, currently only CI supported + return runtype in runtype_dict and Path(runtype_dict.get(runtype)).is_file() - if runtype in runtype_dict.keys(): - if Path(runtype_dict.get(runtype)).is_file(): - return True - return False +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major, minor = divmod(version, 1000) + minor //= 10 + return f"{major}.{minor}" -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) -# -# Not strictly true... however what we mean is -# Pascal or earlier -# -ampere = False -device = cuda.get_current_device() +cuda_version_string = _get_cuda_version_string() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -52,19 +50,10 @@ def skip_book_dir(runtype): runtype = args.runtype -if runtype not in runtype_dict.keys(): +if runtype not in runtype_dict: print(f"Unknown Run Type = {runtype}", file=sys.stderr) exit() - -# check for the attribute using both pre and post numba 0.53 names -cc = getattr(device, "COMPUTE_CAPABILITY", None) or getattr( - device, "compute_capability" -) -if cc[0] >= 8: - ampere = True - -skip = False for filename in glob.iglob("**/*.ipynb", recursive=True): skip = False if skip_book_dir(runtype): @@ -88,10 +77,6 @@ def skip_book_dir(runtype): ) skip = True break - elif ampere and re.search("# Does not run on Ampere", line): - print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) - skip = True - break elif re.search("# Does not run on CUDA ", line) and ( cuda_version_string in line ): diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index b5cd90996c7..c96e91b037c 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail @@ -7,15 +7,6 @@ package_name=$1 python_package_name=$(echo ${package_name}|sed 's/-/_/g') -mkdir -p ./dist -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" - -RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist - -# use 'ls' to expand wildcard before adding `[extra]` requires for pip -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - # Run smoke tests for aarch64 pull requests arch=$(uname -m) if [[ "${arch}" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then diff --git a/ci/test_wheel_cugraph.sh b/ci/test_wheel_cugraph.sh index 295cec7cb10..4703ed61985 100755 --- a/ci/test_wheel_cugraph.sh +++ b/ci/test_wheel_cugraph.sh @@ -1,11 +1,17 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail -# Download the pylibcugraph built in the previous step +# Download the packages built in the previous step +mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep -python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl +RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./local-pylibcugraph-dep + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/cugraph*.whl)[test]" \ + ./local-pylibcugraph-dep/pylibcugraph*.whl ./ci/test_wheel.sh cugraph diff --git a/ci/test_wheel_pylibcugraph.sh b/ci/test_wheel_pylibcugraph.sh index ddc9976308b..d0c97834a20 100755 --- a/ci/test_wheel_pylibcugraph.sh +++ b/ci/test_wheel_pylibcugraph.sh @@ -1,6 +1,15 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail +# Download the packages built in the previous step +mkdir -p ./dist +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/pylibcugraph*.whl)[test]" + ./ci/test_wheel.sh pylibcugraph diff --git a/cpp/cmake/thirdparty/get_raft.cmake b/cpp/cmake/thirdparty/get_raft.cmake index 8f56372c81a..28e9ec0cda7 100644 --- a/cpp/cmake/thirdparty/get_raft.cmake +++ b/cpp/cmake/thirdparty/get_raft.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ function(find_and_configure_raft) endif() rapids_cpm_find(raft ${PKG_VERSION} - GLOBAL_TARGETS raft::raft + GLOBAL_TARGETS raft::raft raft::raft_logger raft::raft_logger_impl BUILD_EXPORT_SET cugraph-exports INSTALL_EXPORT_SET cugraph-exports COMPONENTS ${RAFT_COMPONENTS} @@ -51,7 +51,7 @@ function(find_and_configure_raft) OPTIONS "RAFT_COMPILE_LIBRARY ${PKG_COMPILE_RAFT_LIB}" "BUILD_TESTS OFF" - "BUILD_BENCH OFF" + "BUILD_PRIMS_BENCH OFF" "BUILD_CAGRA_HNSWLIB OFF" ) diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake index 8d57bf570bb..aab159d4242 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,9 @@ function(find_and_configure_cudf) GIT_REPOSITORY https://github.com/${PKG_FORK}/cudf.git GIT_TAG ${PKG_PINNED_TAG} SOURCE_SUBDIR cpp - OPTIONS "BUILD_TESTS OFF" + OPTIONS + "BUILD_BENCHMARKS OFF" + "BUILD_TESTS OFF" ) message(VERBOSE "CUGRAPH_ETL: Using CUDF located in ${cudf_SOURCE_DIR}") @@ -39,8 +41,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cudf "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_ETL_ # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cudf_SOURCE=/path/to/local/cudf +# To use a different cuDF locally, set the CMake variable +# CPM_cudf_SOURCE=/path/to/local/cudf find_and_configure_cudf(VERSION ${CUGRAPH_ETL_MIN_VERSION_cudf} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cudf} diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake index c551646d919..c42bd8d2ae9 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,8 +39,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cugraph "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_E # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cugraph_SOURCE=/path/to/local/cugraph +# To use a different cuGraph locally, set the CMake variable +# CPM_cugraph_SOURCE=/path/to/local/cugraph find_and_configure_cugraph(VERSION ${CUGRAPH_ETL_MIN_VERSION_cugraph} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cugraph} diff --git a/dependencies.yaml b/dependencies.yaml index e1a8cc065c7..56c0f9deba0 100755 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -26,6 +26,7 @@ files: - depends_on_ucx_py - docs - python_build_cythonize + - python_build_skbuild - python_build_rapids - python_build_wheel - python_run_cugraph @@ -77,6 +78,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_cugraph: output: pyproject pyproject_dir: python/cugraph @@ -121,6 +123,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_pylibcugraph: output: pyproject pyproject_dir: python/pylibcugraph @@ -370,6 +373,14 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - rapids-build-backend>=0.3.1,<0.4.0.dev0 + python_build_skbuild: + common: + - output_types: conda + packages: + - scikit-build-core>=0.10.0 + - output_types: [requirements, pyproject] + packages: + - scikit-build-core[pyproject]>=0.10.0 python_build_wheel: common: - output_types: [conda, pyproject, requirements] @@ -381,12 +392,6 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - cython>=3.0.0 - - output_types: conda - packages: - - scikit-build-core>=0.10.0 - - output_types: [pyproject, requirements] - packages: - - scikit-build-core[pyproject]>=0.10.0 python_run_cugraph: common: - output_types: [conda, pyproject] diff --git a/python/cugraph/cugraph/dask/common/mg_utils.py b/python/cugraph/cugraph/dask/common/mg_utils.py index b04f293dc0e..e4e3ac9a44e 100644 --- a/python/cugraph/cugraph/dask/common/mg_utils.py +++ b/python/cugraph/cugraph/dask/common/mg_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,7 +13,7 @@ import os import gc -import numba.cuda +from cuda.bindings import runtime # FIXME: this raft import breaks the library if ucx-py is @@ -53,11 +53,10 @@ def prepare_worker_to_parts(data, client=None): def is_single_gpu(): - ngpus = len(numba.cuda.gpus) - if ngpus > 1: - return False - else: - return True + status, count = runtime.cudaGetDeviceCount() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device count.") + return count > 1 def get_visible_devices(): diff --git a/python/cugraph/cugraph/tests/docs/test_doctests.py b/python/cugraph/cugraph/tests/docs/test_doctests.py index 2095fd41fe9..9d9f8436b99 100644 --- a/python/cugraph/cugraph/tests/docs/test_doctests.py +++ b/python/cugraph/cugraph/tests/docs/test_doctests.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -25,14 +25,21 @@ import cugraph import pylibcugraph import cudf -from numba import cuda +from cuda.bindings import runtime from cugraph.testing import utils modules_to_skip = ["dask", "proto", "raft"] datasets = utils.RAPIDS_DATASET_ROOT_DIR_PATH -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) + +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major = version // 1000 + minor = (version % 1000) // 10 + return f"{major}.{minor}" def _is_public_name(name): @@ -131,6 +138,7 @@ def skip_docstring(docstring_obj): NOTE: this function is currently not available on CUDA 11.4 systems. """ docstring = docstring_obj.docstring + cuda_version_string = _get_cuda_version_string() for line in docstring.splitlines(): if f"currently not available on CUDA {cuda_version_string} systems" in line: return f"docstring example not supported on CUDA {cuda_version_string}" diff --git a/python/cugraph/cugraph/traversal/ms_bfs.py b/python/cugraph/cugraph/traversal/ms_bfs.py index df624e453ee..b80331d475a 100644 --- a/python/cugraph/cugraph/traversal/ms_bfs.py +++ b/python/cugraph/cugraph/traversal/ms_bfs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None): # Fixme not implemented in RMM yet # using 96GB upper bound for now - # mem = get_device_memory_info() mem = 9.6e10 n_sources = sources.size V = G.number_of_vertices() diff --git a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx index 98d11ad07df..8e71c7aae4e 100644 --- a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx +++ b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,7 +19,6 @@ from cugraph.utilities.path_retrieval cimport get_traversed_cost as c_get_traversed_cost from cugraph.structure.graph_primtypes cimport * from libc.stdint cimport uintptr_t -from numba import cuda import cudf import numpy as np diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 0257da4ffc0..493a9850a0f 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -15,14 +15,9 @@ import os import shutil -from numba import cuda - import cudf from cudf.core.column import as_column -from cuda.cudart import cudaDeviceAttr -from rmm._cuda.gpu import getDeviceAttribute - from warnings import warn # optional dependencies @@ -210,47 +205,6 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version=(10, 2)): - """ - Returns True if the version of CUDA being used is less than min_version - """ - this_cuda_ver = cuda.runtime.get_version() # returns (, ) - if this_cuda_ver[0] > min_version[0]: - return False - if this_cuda_ver[0] < min_version[0]: - return True - if this_cuda_ver[1] < min_version[1]: - return True - return False - - -def is_device_version_less_than(min_version=(7, 0)): - """ - Returns True if the version of CUDA being used is less than min_version - """ - major_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, 0 - ) - minor_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, 0 - ) - if major_version > min_version[0]: - return False - if major_version < min_version[0]: - return True - if minor_version < min_version[1]: - return True - return False - - -def get_device_memory_info(): - """ - Returns the total amount of global memory on the device in bytes - """ - meminfo = cuda.current_context().get_memory_info() - return meminfo[1] - - # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if # set. An additional optional parameter for the weight attr name when accepting # Nx graphs may be needed. From the Nx docs: