Skip to content

Commit

Permalink
fix: failing test_cli unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
dPys committed Dec 23, 2024
1 parent dd3f256 commit ad76fa3
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 0 deletions.
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@

#### Setting up PostgreSQL

In a terminal window:

1. **Install PostgreSQL**:

- On macOS (with Homebrew):
Expand Down Expand Up @@ -80,6 +82,8 @@

### Installing `nxbench`

In a new terminal window:

PyPi:

```bash
Expand Down
4 changes: 4 additions & 0 deletions doc/installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ make install

### Setting up Prefect Orion

In a terminal window:

1. **Export environment variables pointing to your PostgreSQL database**:

```bash
Expand All @@ -80,6 +82,8 @@ make install

## Running Benchmarks

In a new terminal window:

```bash
nxbench --config 'nxbench/configs/example.yaml' benchmark run
```
Expand Down
2 changes: 2 additions & 0 deletions nxbench/benchmarks/tests/test_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,8 @@ def path_side_effect(arg):
assert data[0]["some"] == "result"


## TODO: Fix caplog setup here (passes locally for python3.10), but fails for other
## python versions)
# @pytest.mark.asyncio
# @pytest.mark.usefixtures("patch_machine_info", "patch_python_version")
# @patch("nxbench.benchmarks.benchmark.setup_cache", return_value={"ds1": ("graph",
Expand Down
145 changes: 145 additions & 0 deletions nxbench/tests/test_cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import json
import logging
import tempfile
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch

import pytest
from click.testing import CliRunner

from nxbench.cli import cli


@pytest.fixture
def runner():
return CliRunner()


@pytest.fixture(autouse=True)
def _restore_nxbench_logger():
"""
Automatically runs for each test, capturing and restoring the nxbench logger state.
This prevents the CLI tests (which can set verbosity=0 and disable logging) from
causing side-effects that break other tests (e.g. BenchmarkValidator).
"""
logger = logging.getLogger("nxbench")
prev_disabled = logger.disabled
prev_level = logger.level
yield
logger.disabled = prev_disabled
logger.setLevel(prev_level)


def test_cli_no_args(runner):
result = runner.invoke(cli, [])
# Suppose your code returns 0 for no subcommand
assert result.exit_code == 0
assert "Usage:" in result.output


@patch("nxbench.cli.BenchmarkDataManager.load_network_sync")
def test_data_download_ok(mock_load_sync, runner):
mock_load_sync.return_value = ("fake_graph", {"meta": "test"})
args = ["data", "download", "test_dataset", "--category", "my_category"]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
mock_load_sync.assert_called_once()


@patch("nxbench.cli.NetworkRepository")
def test_data_list_datasets_ok(mock_repo_cls, runner):
mock_repo_instance = AsyncMock()
mock_repo_cls.return_value.__aenter__.return_value = mock_repo_instance
FakeMeta = MagicMock()
FakeMeta.__dict__ = {
"name": "Net1",
"category": "cat1",
"nodes": 100,
"directed": False,
}
mock_repo_instance.list_networks.return_value = [FakeMeta]

args = [
"data",
"list-datasets",
"--category",
"cat1",
"--min-nodes",
"50",
"--max-nodes",
"1000",
]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
assert "Net1" in result.output


@patch("nxbench.cli.main_benchmark", new_callable=AsyncMock)
def test_benchmark_run_ok(mock_main_benchmark, runner):
mock_main_benchmark.return_value = None
args = ["benchmark", "run"]
result = runner.invoke(cli, args, catch_exceptions=True)
assert result.exit_code == 0, result.output
mock_main_benchmark.assert_awaited_once()


@patch("nxbench.cli.ResultsExporter")
def test_benchmark_export_ok(mock_exporter_cls, runner):
mock_exporter = mock_exporter_cls.return_value
mock_exporter.export_results.return_value = None

with tempfile.TemporaryDirectory() as tmpdir:
result_file = Path(tmpdir) / "results.json"
data = [{"algorithm": "algo", "result": 42}]
result_file.write_text(json.dumps(data))

output_file = Path(tmpdir) / "exported.csv"
args = [
"benchmark",
"export",
str(result_file),
"--output-format",
"csv",
"--output-file",
str(output_file),
]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output

mock_exporter_cls.assert_called_once_with(results_file=result_file)
mock_exporter.export_results.assert_called_once_with(
output_path=output_file, form="csv"
)


@patch("nxbench.cli.BenchmarkValidator")
def test_validate_check_ok(mock_validator_cls, runner):
mock_validator = mock_validator_cls.return_value
mock_validator.validate_result.return_value = True

with tempfile.TemporaryDirectory() as tmpdir:
result_file = Path(tmpdir) / "results.json"
data = [
{"algorithm": "algo1", "result": 1},
{"algorithm": "algo2", "result": 2},
]
result_file.write_text(json.dumps(data))

args = ["validate", "check", str(result_file)]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output

calls = mock_validator.validate_result.call_args_list
assert len(calls) == 2
assert calls[0].args[0] == 1
assert calls[0].args[1] == "algo1"
assert calls[1].args[0] == 2
assert calls[1].args[1] == "algo2"


@patch("nxbench.viz.app.run_server")
def test_viz_serve_ok(mock_run_server, runner):
args = ["viz", "serve", "--port", "9999", "--debug"]
result = runner.invoke(cli, args)
assert result.exit_code == 0, result.output
mock_run_server.assert_called_once_with(port=9999, debug=True)

0 comments on commit ad76fa3

Please sign in to comment.