Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test cli codecov #21

Merged
merged 2 commits into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,9 @@ jobs:
make test

- name: Upload Coverage to Codecov
uses: codecov/codecov-action@v4
uses: codecov/codecov-action@v5
if: ${{ always() }}
with:
files: cov.xml
token: ${{ secrets.CODECOV_TOKEN }}
slug: dPys/nxbench
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
[![PyPI](https://badge.fury.io/py/nxbench.svg)](https://badge.fury.io/py/nxbench)
[![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://pre-commit.com/)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![codecov](https://codecov.io/gh/dPys/nxbench/graph/badge.svg?token=1M8NM7MQLI)](https://codecov.io/gh/dPys/nxbench)

# NxBench

Expand Down Expand Up @@ -32,6 +33,8 @@

#### Setting up PostgreSQL

In a terminal window:

1. **Install PostgreSQL**:

- On macOS (with Homebrew):
Expand Down Expand Up @@ -79,6 +82,8 @@

### Installing `nxbench`

In a new terminal window:

PyPi:

```bash
Expand Down
4 changes: 4 additions & 0 deletions doc/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ make install

### Setting up Prefect Orion

In a terminal window:

1. **Export environment variables pointing to your PostgreSQL database**:

```bash
Expand All @@ -80,6 +82,8 @@ make install

## Running Benchmarks

In a new terminal window:

```bash
nxbench --config 'nxbench/configs/example.yaml' benchmark run
```
Expand Down
2 changes: 0 additions & 2 deletions nxbench/benchmarks/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,7 @@ def validate_results(
@task(name="collect_metrics", cache_key_fn=None, persist_result=False)
def collect_metrics(
execution_time: float,
### ADDED:
execution_time_with_preloading: float,
### END ADDED
peak_memory: int,
graph: Any,
algo_config: AlgorithmConfig,
Expand Down
2 changes: 2 additions & 0 deletions nxbench/benchmarks/tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,8 @@ def path_side_effect(arg):
assert data[0]["some"] == "result"


## TODO: Fix caplog setup here (passes locally for python3.10), but fails for other
## python versions)
# @pytest.mark.asyncio
# @pytest.mark.usefixtures("patch_machine_info", "patch_python_version")
# @patch("nxbench.benchmarks.benchmark.setup_cache", return_value={"ds1": ("graph",
Expand Down
145 changes: 145 additions & 0 deletions nxbench/tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import json
import logging
import tempfile
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch

import pytest
from click.testing import CliRunner

from nxbench.cli import cli


@pytest.fixture
def runner():
return CliRunner()


@pytest.fixture(autouse=True)
def _restore_nxbench_logger():
"""
Automatically runs for each test, capturing and restoring the nxbench logger state.
This prevents the CLI tests (which can set verbosity=0 and disable logging) from
causing side-effects that break other tests (e.g. BenchmarkValidator).
"""
logger = logging.getLogger("nxbench")
prev_disabled = logger.disabled
prev_level = logger.level
yield
logger.disabled = prev_disabled
logger.setLevel(prev_level)


def test_cli_no_args(runner):
result = runner.invoke(cli, [])
# Suppose your code returns 0 for no subcommand
assert result.exit_code == 0
assert "Usage:" in result.output


@patch("nxbench.cli.BenchmarkDataManager.load_network_sync")
def test_data_download_ok(mock_load_sync, runner):
mock_load_sync.return_value = ("fake_graph", {"meta": "test"})
args = ["data", "download", "test_dataset", "--category", "my_category"]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
mock_load_sync.assert_called_once()


@patch("nxbench.cli.NetworkRepository")
def test_data_list_datasets_ok(mock_repo_cls, runner):
mock_repo_instance = AsyncMock()
mock_repo_cls.return_value.__aenter__.return_value = mock_repo_instance
FakeMeta = MagicMock()
FakeMeta.__dict__ = {
"name": "Net1",
"category": "cat1",
"nodes": 100,
"directed": False,
}
mock_repo_instance.list_networks.return_value = [FakeMeta]

args = [
"data",
"list-datasets",
"--category",
"cat1",
"--min-nodes",
"50",
"--max-nodes",
"1000",
]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
assert "Net1" in result.output


@patch("nxbench.cli.main_benchmark", new_callable=AsyncMock)
def test_benchmark_run_ok(mock_main_benchmark, runner):
mock_main_benchmark.return_value = None
args = ["benchmark", "run"]
result = runner.invoke(cli, args, catch_exceptions=True)
assert result.exit_code == 0, result.output
mock_main_benchmark.assert_awaited_once()


@patch("nxbench.cli.ResultsExporter")
def test_benchmark_export_ok(mock_exporter_cls, runner):
mock_exporter = mock_exporter_cls.return_value
mock_exporter.export_results.return_value = None

with tempfile.TemporaryDirectory() as tmpdir:
result_file = Path(tmpdir) / "results.json"
data = [{"algorithm": "algo", "result": 42}]
result_file.write_text(json.dumps(data))

output_file = Path(tmpdir) / "exported.csv"
args = [
"benchmark",
"export",
str(result_file),
"--output-format",
"csv",
"--output-file",
str(output_file),
]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output

mock_exporter_cls.assert_called_once_with(results_file=result_file)
mock_exporter.export_results.assert_called_once_with(
output_path=output_file, form="csv"
)


@patch("nxbench.cli.BenchmarkValidator")
def test_validate_check_ok(mock_validator_cls, runner):
mock_validator = mock_validator_cls.return_value
mock_validator.validate_result.return_value = True

with tempfile.TemporaryDirectory() as tmpdir:
result_file = Path(tmpdir) / "results.json"
data = [
{"algorithm": "algo1", "result": 1},
{"algorithm": "algo2", "result": 2},
]
result_file.write_text(json.dumps(data))

args = ["validate", "check", str(result_file)]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output

calls = mock_validator.validate_result.call_args_list
assert len(calls) == 2
assert calls[0].args[0] == 1
assert calls[0].args[1] == "algo1"
assert calls[1].args[0] == 2
assert calls[1].args[1] == "algo2"


@patch("nxbench.viz.app.run_server")
def test_viz_serve_ok(mock_run_server, runner):
args = ["viz", "serve", "--port", "9999", "--debug"]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
mock_run_server.assert_called_once_with(port=9999, debug=True)
Loading