From af1b32023d8dd6b0d2fa0d18263ef891152c10ee Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 10:25:14 -0500
Subject: [PATCH 01/12] Make tad-libcint optional
---
src/dxtb/_src/calculators/config/integral.py | 31 +++++
src/dxtb/_src/exlibs/available.py | 30 +++++
src/dxtb/_src/integral/wrappers.py | 13 +-
test/test_basis/test_setup.py | 3 +
.../test_cache/test_integrals.py | 20 ++-
.../test_cache/test_properties.py | 117 ++++++++++++++++++
test/test_cli/test_driver.py | 14 ++-
test/test_external/test_field.py | 4 +
.../test_driver/test_manager.py | 87 +++++++------
test/test_integrals/test_libcint.py | 19 +--
test/test_integrals/test_wrappers.py | 3 +
test/test_libcint/test_gradcheck.py | 9 +-
test/test_libcint/test_overlap.py | 40 ++++--
test/test_libcint/test_overlap_grad.py | 9 +-
test/test_multipole/test_dipole_integral.py | 18 ++-
test/test_multipole/test_shape.py | 7 +-
test/test_multipole/test_symmetry.py | 7 +-
test/test_multipole/todo_test_dipole_grad.py | 8 +-
test/test_properties/test_dipole.py | 9 ++
test/test_properties/test_dipole_deriv.py | 8 ++
test/test_properties/test_hyperpol.py | 8 ++
test/test_properties/test_ir.py | 8 ++
test/test_properties/test_pol.py | 8 ++
test/test_properties/test_pol_deriv.py | 8 ++
test/test_properties/test_raman.py | 8 ++
test/test_singlepoint/test_grad_field.py | 5 +
test/test_singlepoint/test_grad_fieldgrad.py | 5 +
.../test_grad_pos_withfield.py | 5 +
test/test_utils/test_misc.py | 13 +-
29 files changed, 439 insertions(+), 85 deletions(-)
create mode 100644 src/dxtb/_src/exlibs/available.py
diff --git a/src/dxtb/_src/calculators/config/integral.py b/src/dxtb/_src/calculators/config/integral.py
index edb371c3..660342f0 100644
--- a/src/dxtb/_src/calculators/config/integral.py
+++ b/src/dxtb/_src/calculators/config/integral.py
@@ -80,6 +80,20 @@ def __init__(
if isinstance(driver, str):
if driver.casefold() in labels.INTDRIVER_LIBCINT_STRS:
+ # pylint: disable=import-outside-toplevel
+ from dxtb._src.exlibs.available import has_libcint
+
+ # The default input is an integer. So, if we receive a string
+ # here, we need to assume that the libcint driver was
+ # explicitly requested and we need to check if the libcint
+ # interface is available.
+ if has_libcint is False:
+ raise ValueError(
+ "The integral driver seems to be have been set "
+ f"explicitly to '{driver}'. However, the libcint "
+ "interface is not installed."
+ )
+
self.driver = labels.INTDRIVER_LIBCINT
elif driver.casefold() in labels.INTDRIVER_ANALYTICAL_STRS:
self.driver = labels.INTDRIVER_ANALYTICAL
@@ -87,6 +101,7 @@ def __init__(
self.driver = labels.INTDRIVER_AUTOGRAD
else:
raise ValueError(f"Unknown integral driver '{driver}'.")
+
elif isinstance(driver, int):
if driver not in (
labels.INTDRIVER_LIBCINT,
@@ -95,6 +110,22 @@ def __init__(
):
raise ValueError(f"Unknown integral driver '{driver}'.")
+ if driver == labels.INTDRIVER_LIBCINT:
+ # pylint: disable=import-outside-toplevel
+ from dxtb._src.exlibs.available import has_libcint
+
+ # If we receive the default integer here, we issue a warning
+ # and fall back to the PyTorch driver.
+ if has_libcint is False:
+ from dxtb import OutputHandler
+
+ OutputHandler.warn(
+ "The libcint interface is not installed. "
+ "Falling back to the analytical driver."
+ )
+
+ driver = labels.INTDRIVER_ANALYTICAL
+
self.driver = driver
else:
raise TypeError(
diff --git a/src/dxtb/_src/exlibs/available.py b/src/dxtb/_src/exlibs/available.py
new file mode 100644
index 00000000..6a781285
--- /dev/null
+++ b/src/dxtb/_src/exlibs/available.py
@@ -0,0 +1,30 @@
+"""
+Exlibs: Check Availablity
+=========================
+
+Simple check for the availability of external libraries.
+"""
+
+try:
+ from tad_libcint import __version__ # type: ignore
+
+ has_libcint = True
+except ImportError:
+ has_libcint = False
+
+try:
+ from pyscf import __version__ # type: ignore
+
+ has_pyscf = True
+except ImportError:
+ has_pyscf = False
+
+try:
+ from scipy import __version__ # type: ignore
+
+ has_scipy = True
+except ImportError:
+ has_scipy = False
+
+
+__all__ = ["has_libcint", "has_pyscf", "has_scipy"]
diff --git a/src/dxtb/_src/integral/wrappers.py b/src/dxtb/_src/integral/wrappers.py
index b1eb9692..c347091b 100644
--- a/src/dxtb/_src/integral/wrappers.py
+++ b/src/dxtb/_src/integral/wrappers.py
@@ -246,8 +246,17 @@ def _integral(
# Driver #
##########
- # Determine which driver class to instantiate (defaults to libcint)
- driver_name = kwargs.pop("driver", labels.INTDRIVER_LIBCINT)
+ # Determine which driver class to instantiate
+ # (defaults to libcint if available)
+ driver_name = kwargs.pop("driver", None)
+ if driver_name is None:
+ # pylint: disable=import-outside-toplevel
+ from dxtb._src.exlibs.available import has_libcint
+
+ if has_libcint is True:
+ driver_name = labels.INTDRIVER_LIBCINT
+ else:
+ driver_name = labels.INTDRIVER_ANALYTICAL
# setup driver for integral calculation
drv_mgr = DriverManager(driver_name, **dd)
diff --git a/test/test_basis/test_setup.py b/test/test_basis/test_setup.py
index 2c574762..c87e8aa9 100644
--- a/test/test_basis/test_setup.py
+++ b/test/test_basis/test_setup.py
@@ -27,6 +27,7 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD
from ..conftest import DEVICE
@@ -35,6 +36,7 @@
sample_list = ["H2", "LiH", "Li2", "H2O", "S", "SiH4", "MB16_43_01"]
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_single(dtype: torch.dtype, name: str):
@@ -62,6 +64,7 @@ def test_single(dtype: torch.dtype, name: str):
assert [b.angmom for b in basis.bases] == [0, 1, 2]
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", sample_list)
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_calculator/test_cache/test_integrals.py b/test/test_calculator/test_cache/test_integrals.py
index 63d0aafb..ce987c84 100644
--- a/test/test_calculator/test_cache/test_integrals.py
+++ b/test/test_calculator/test_cache/test_integrals.py
@@ -23,6 +23,8 @@
import pytest
import torch
+from dxtb import labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.calculators import GFN1Calculator
@@ -59,8 +61,7 @@ def test_overlap_deleted(dtype: torch.dtype) -> None:
assert calc.integrals.overlap._gradient is None
-@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-def test_overlap_retained_for_grad(dtype: torch.dtype) -> None:
+def overlap_retained_for_grad(dtype: torch.dtype, intdriver: int) -> None:
dd: DD = {"device": DEVICE, "dtype": dtype}
numbers = torch.tensor([3, 1], device=DEVICE)
@@ -68,7 +69,9 @@ def test_overlap_retained_for_grad(dtype: torch.dtype) -> None:
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd, requires_grad=True
)
- calc = GFN1Calculator(numbers, opts={"verbosity": 0}, **dd)
+ calc = GFN1Calculator(
+ numbers, opts={"verbosity": 0, "int_driver": intdriver}, **dd
+ )
assert calc._ncalcs == 0
# overlap should not be cached
@@ -86,3 +89,14 @@ def test_overlap_retained_for_grad(dtype: torch.dtype) -> None:
assert calc.integrals.overlap is not None
assert calc.integrals.overlap._matrix is not None
assert calc.integrals.overlap._norm is not None
+
+
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_overlap_retained_for_grad_pytorch(dtype: torch.dtype) -> None:
+ overlap_retained_for_grad(dtype, labels.INTDRIVER_AUTOGRAD)
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_overlap_retained_for_grad_libcint(dtype: torch.dtype) -> None:
+ overlap_retained_for_grad(dtype, labels.INTDRIVER_LIBCINT)
diff --git a/test/test_calculator/test_cache/test_properties.py b/test/test_calculator/test_cache/test_properties.py
index c44ff0b4..0b838fe5 100644
--- a/test/test_calculator/test_cache/test_properties.py
+++ b/test/test_calculator/test_cache/test_properties.py
@@ -25,6 +25,7 @@
from dxtb import GFN1_XTB
from dxtb._src.calculators.properties.vibration import VibResult
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Literal, Tensor
from dxtb.calculators import (
AnalyticalCalculator,
@@ -61,6 +62,11 @@ def test_energy(dtype: torch.dtype) -> None:
assert calc._ncalcs == 1
assert isinstance(energy, Tensor)
+ # get other properties
+ energy = calc.get_iterations(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(energy, Tensor)
+
# check reset
calc.cache.reset_all()
assert len(calc.cache.list_cached_properties()) == 0
@@ -212,3 +218,114 @@ def test_vibration(dtype: torch.dtype, use_functorch: bool) -> None:
prop = calc.get_hessian(pos, use_functorch=use_functorch, matrix=False)
assert calc._ncalcs == 1
assert isinstance(prop, Tensor)
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_dipole(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+ calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
+ assert calc._ncalcs == 0
+
+ prop = calc.get_dipole(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_dipole_moment(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_ir(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+ calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
+ assert calc._ncalcs == 0
+
+ prop = calc.get_ir(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_ir(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for IR intensities
+ prop = calc.get_ir_intensities(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_raman(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+ calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
+ assert calc._ncalcs == 0
+
+ prop = calc.get_raman(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_raman(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for Raman depolarization ratio
+ prop = calc.get_raman_depol(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for IR intensities
+ prop = calc.get_raman_intensities(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
diff --git a/test/test_cli/test_driver.py b/test/test_cli/test_driver.py
index 6f1dec82..513ca5c3 100644
--- a/test/test_cli/test_driver.py
+++ b/test/test_cli/test_driver.py
@@ -24,6 +24,7 @@
import torch
from dxtb._src.cli import Driver, parser
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.timing import timer
from dxtb._src.typing import DD
@@ -105,9 +106,16 @@ def test_fail() -> None:
setattr(args, "method", "xtb")
Driver(args).singlepoint()
- with pytest.raises(NotImplementedError):
- setattr(args, "method", "gfn2")
- Driver(args).singlepoint()
+ # Before reaching the NotImplementedError, the RuntimeError can be
+ # raised if the libcint is not available.
+ if has_libcint is False:
+ with pytest.raises(RuntimeError):
+ setattr(args, "method", "gfn2")
+ Driver(args).singlepoint()
+ else:
+ with pytest.raises(NotImplementedError):
+ setattr(args, "method", "gfn2")
+ Driver(args).singlepoint()
with pytest.raises(ValueError):
setattr(args, "method", "gfn1")
diff --git a/test/test_external/test_field.py b/test/test_external/test_field.py
index 5f6beef4..7252beef 100644
--- a/test/test_external/test_field.py
+++ b/test/test_external/test_field.py
@@ -30,6 +30,7 @@
from dxtb import GFN1_XTB, Calculator
from dxtb._src.components.interactions import new_efield
from dxtb._src.constants import labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD
from ..conftest import DEVICE
@@ -45,6 +46,7 @@
}
+@pytest.mark.skipif(not has_libcint, reason="Libcint not available.")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -70,6 +72,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == res.cpu()
+@pytest.mark.skipif(not has_libcint, reason="Libcint not available.")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", sample_list)
@pytest.mark.parametrize("name2", sample_list)
@@ -116,6 +119,7 @@ def test_batch(
assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == res.cpu()
+@pytest.mark.skipif(not has_libcint, reason="Libcint not available.")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", ["LiH"])
diff --git a/test/test_integrals/test_driver/test_manager.py b/test/test_integrals/test_driver/test_manager.py
index a448b51b..65b963b0 100644
--- a/test/test_integrals/test_driver/test_manager.py
+++ b/test/test_integrals/test_driver/test_manager.py
@@ -26,6 +26,7 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.constants.labels import INTDRIVER_ANALYTICAL, INTDRIVER_LIBCINT
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.integral.driver.libcint import IntDriverLibcint
from dxtb._src.integral.driver.manager import DriverManager
from dxtb._src.integral.driver.pytorch import IntDriverPytorch
@@ -45,9 +46,7 @@ def test_fail() -> None:
mgr.create_driver(numbers, par, IndexHelper.from_numbers(numbers, par))
-@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
-def test_single(dtype: torch.dtype, force_cpu_for_libcint: bool):
+def single(name: int, dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
numbers = torch.tensor([3, 1], device=DEVICE)
@@ -55,36 +54,43 @@ def test_single(dtype: torch.dtype, force_cpu_for_libcint: bool):
ihelp = IndexHelper.from_numbers(numbers, par)
- mgr_py = DriverManager(
- INTDRIVER_ANALYTICAL, force_cpu_for_libcint=force_cpu_for_libcint, **dd
- )
- mgr_py.create_driver(numbers, par, ihelp)
-
- mgr_lc = DriverManager(
- INTDRIVER_LIBCINT, force_cpu_for_libcint=force_cpu_for_libcint, **dd
- )
- mgr_lc.create_driver(numbers, par, ihelp)
+ mgr = DriverManager(name, force_cpu_for_libcint=force_cpu_for_libcint, **dd)
+ mgr.create_driver(numbers, par, ihelp)
if force_cpu_for_libcint is True:
positions = positions.cpu()
- mgr_py.setup_driver(positions)
- assert isinstance(mgr_py.driver, IntDriverPytorch)
- mgr_lc.setup_driver(positions)
- assert isinstance(mgr_lc.driver, IntDriverLibcint)
+ mgr.setup_driver(positions)
+ if name == INTDRIVER_ANALYTICAL:
+ assert isinstance(mgr.driver, IntDriverPytorch)
+ elif name == INTDRIVER_LIBCINT:
+ assert isinstance(mgr.driver, IntDriverLibcint)
- assert mgr_py.driver.is_latest(positions) is True
- assert mgr_lc.driver.is_latest(positions) is True
+ assert mgr.driver.is_latest(positions) is True
# upon changing the positions, the driver should become outdated
positions[0, 0] += 1e-4
- assert mgr_py.driver.is_latest(positions) is False
- assert mgr_lc.driver.is_latest(positions) is False
+ assert mgr.driver.is_latest(positions) is False
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
-def test_batch(dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
+def test_libcint_single(
+ dtype: torch.dtype, force_cpu_for_libcint: bool
+) -> None:
+ single(INTDRIVER_LIBCINT, dtype, force_cpu_for_libcint)
+
+
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
+def test_pytorch_single(
+ dtype: torch.dtype, force_cpu_for_libcint: bool
+) -> None:
+ single(INTDRIVER_ANALYTICAL, dtype, force_cpu_for_libcint)
+
+
+def batch(name: int, dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
dd: DD = {"dtype": dtype, "device": DEVICE}
numbers = torch.tensor([[3, 1], [1, 0]], device=DEVICE)
@@ -92,28 +98,33 @@ def test_batch(dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
ihelp = IndexHelper.from_numbers(numbers, par)
- mgr_py = DriverManager(
- INTDRIVER_ANALYTICAL, force_cpu_for_libcint=force_cpu_for_libcint, **dd
- )
- mgr_py.create_driver(numbers, par, ihelp)
-
- mgr_lc = DriverManager(
- INTDRIVER_LIBCINT, force_cpu_for_libcint=force_cpu_for_libcint, **dd
- )
- mgr_lc.create_driver(numbers, par, ihelp)
+ mgr = DriverManager(name, force_cpu_for_libcint=force_cpu_for_libcint, **dd)
+ mgr.create_driver(numbers, par, ihelp)
if force_cpu_for_libcint is True:
positions = positions.cpu()
- mgr_py.setup_driver(positions)
- assert isinstance(mgr_py.driver, IntDriverPytorch)
- mgr_lc.setup_driver(positions)
- assert isinstance(mgr_lc.driver, IntDriverLibcint)
+ mgr.setup_driver(positions)
+ if name == INTDRIVER_ANALYTICAL:
+ assert isinstance(mgr.driver, IntDriverPytorch)
+ elif name == INTDRIVER_LIBCINT:
+ assert isinstance(mgr.driver, IntDriverLibcint)
- assert mgr_py.driver.is_latest(positions) is True
- assert mgr_lc.driver.is_latest(positions) is True
+ assert mgr.driver.is_latest(positions) is True
# upon changing the positions, the driver should become outdated
positions[0, 0] += 1e-4
- assert mgr_py.driver.is_latest(positions) is False
- assert mgr_lc.driver.is_latest(positions) is False
+ assert mgr.driver.is_latest(positions) is False
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
+def test_libcint_batch(dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
+ batch(INTDRIVER_LIBCINT, dtype, force_cpu_for_libcint)
+
+
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
+def test_pytorch_batch(dtype: torch.dtype, force_cpu_for_libcint: bool) -> None:
+ batch(INTDRIVER_ANALYTICAL, dtype, force_cpu_for_libcint)
diff --git a/test/test_integrals/test_libcint.py b/test/test_integrals/test_libcint.py
index b824bd34..fe0afc42 100644
--- a/test/test_integrals/test_libcint.py
+++ b/test/test_integrals/test_libcint.py
@@ -28,8 +28,8 @@
from dxtb import IndexHelper
from dxtb import integrals as ints
from dxtb import labels
-from dxtb._src.exlibs.libcint import LibcintWrapper
-from dxtb._src.integral.driver import libcint
+from dxtb._src.exlibs.available import has_libcint
+from dxtb._src.integral.driver.libcint import IntDriverLibcint
from dxtb._src.integral.driver.manager import DriverManager
from dxtb._src.integral.factory import (
new_dipint_libcint,
@@ -38,6 +38,9 @@
)
from dxtb._src.typing import DD, Tensor
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from .samples import samples
@@ -53,13 +56,13 @@ def run(numbers: Tensor, positions: Tensor, cpu: bool, dd: DD) -> None:
i.build_overlap(positions, force_cpu_for_libcint=cpu)
if numbers.ndim == 1:
- assert isinstance(mgr.driver, libcint.IntDriverLibcint)
- assert isinstance(mgr.driver.drv, LibcintWrapper)
+ assert isinstance(mgr.driver, IntDriverLibcint)
+ assert isinstance(mgr.driver.drv, libcint.LibcintWrapper)
else:
- assert isinstance(mgr.driver, libcint.IntDriverLibcint)
+ assert isinstance(mgr.driver, IntDriverLibcint)
assert isinstance(mgr.driver.drv, list)
- assert isinstance(mgr.driver.drv[0], LibcintWrapper)
- assert isinstance(mgr.driver.drv[1], LibcintWrapper)
+ assert isinstance(mgr.driver.drv[0], libcint.LibcintWrapper)
+ assert isinstance(mgr.driver.drv[1], libcint.LibcintWrapper)
################################################
@@ -89,6 +92,7 @@ def run(numbers: Tensor, positions: Tensor, cpu: bool, dd: DD) -> None:
assert q.matrix is not None
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("name", ["H2"])
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("force_cpu_for_libcint", [True, False])
@@ -102,6 +106,7 @@ def test_single(dtype: torch.dtype, name: str, force_cpu_for_libcint: bool):
run(numbers, positions, force_cpu_for_libcint, dd)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("name1", ["H2"])
@pytest.mark.parametrize("name2", ["LiH"])
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
diff --git a/test/test_integrals/test_wrappers.py b/test/test_integrals/test_wrappers.py
index 4bb734db..488e799b 100644
--- a/test/test_integrals/test_wrappers.py
+++ b/test/test_integrals/test_wrappers.py
@@ -24,6 +24,7 @@
import torch
from dxtb import GFN1_XTB, GFN2_XTB, Param
+from dxtb._src.exlibs.available import has_libcint
from dxtb.integrals import wrappers
numbers = torch.tensor([14, 1, 1, 1, 1])
@@ -85,11 +86,13 @@ def test_overlap() -> None:
assert s.shape == (17, 17)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
def test_dipole() -> None:
s = wrappers.dipint(numbers, positions, GFN1_XTB)
assert s.shape == (3, 17, 17)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
def test_quad() -> None:
s = wrappers.quadint(numbers, positions, GFN1_XTB)
assert s.shape == (9, 17, 17)
diff --git a/test/test_libcint/test_gradcheck.py b/test/test_libcint/test_gradcheck.py
index 2588533f..1d250a6d 100644
--- a/test/test_libcint/test_gradcheck.py
+++ b/test/test_libcint/test_gradcheck.py
@@ -28,11 +28,14 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.integral.driver.libcint import IntDriverLibcint, OverlapLibcint
from dxtb._src.typing import DD, Callable, Tensor
from dxtb._src.utils import is_basis_list
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from .samples import samples
@@ -68,6 +71,7 @@ def func(pos: Tensor) -> Tensor:
return func, pos
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@@ -82,6 +86,7 @@ def test_grad(dtype: torch.dtype, name: str, intstr: str, deriv: str) -> None:
assert dgradcheck(func, diffvars, atol=tol, rtol=tol, nondet_tol=1e-7)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@@ -133,6 +138,7 @@ def func(p: Tensor) -> Tensor:
return func, pos
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.grad
@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
@pytest.mark.parametrize("dtype", [torch.double])
@@ -147,6 +153,7 @@ def skip_test_grad_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert dgradcheck(func, diffvars, atol=tol, nondet_tol=1e-7)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.grad
@pytest.mark.filterwarnings("ignore") # torch.meshgrid from batch.deflate
@pytest.mark.parametrize("dtype", [torch.double])
diff --git a/test/test_libcint/test_overlap.py b/test/test_libcint/test_overlap.py
index 630a7fa3..58886aab 100644
--- a/test/test_libcint/test_overlap.py
+++ b/test/test_libcint/test_overlap.py
@@ -34,16 +34,12 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint, has_pyscf
from dxtb._src.typing import DD, Tensor
from dxtb._src.utils import is_basis_list
-try:
- from dxtb._src.exlibs.pyscf.mol import M
-
- pyscf = True
-except ImportError:
- pyscf = False
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
from ..conftest import DEVICE
from .samples import samples
@@ -81,7 +77,10 @@ def extract_blocks(x: Tensor, block_sizes: list[int] | Tensor) -> list[Tensor]:
return blocks
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -89,7 +88,10 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_large(dtype: torch.dtype, name: str) -> None:
@@ -131,7 +133,10 @@ def run_single(dtype: torch.dtype, name: str) -> None:
assert pytest.approx(pyscf_overlap.cpu(), abs=tol) == dxtb_overlap.cpu()
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -144,7 +149,10 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.large
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -218,7 +226,10 @@ def run_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
assert s_packed.shape == torch.Size((2, max_size, max_size))
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist)
def test_grad(dtype: torch.dtype, name: str) -> None:
@@ -226,7 +237,10 @@ def test_grad(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
-@pytest.mark.skipif(pyscf is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_large_grad(dtype: torch.dtype, name: str) -> None:
diff --git a/test/test_libcint/test_overlap_grad.py b/test/test_libcint/test_overlap_grad.py
index 04abc0d2..1e4e1511 100644
--- a/test/test_libcint/test_overlap_grad.py
+++ b/test/test_libcint/test_overlap_grad.py
@@ -30,10 +30,13 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb._src.utils import is_basis_list
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from ..utils import load_from_npz
from .samples import samples
@@ -75,6 +78,7 @@ def explicit(name: str, dd: DD, tol: float) -> None:
assert pytest.approx(ref.cpu(), abs=tol) == final_grad.cpu()
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_explicit(dtype: torch.dtype, name: str) -> None:
@@ -83,6 +87,7 @@ def test_explicit(dtype: torch.dtype, name: str) -> None:
explicit(name, dd, tol)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["MB16_43_01"])
def test_explicit_medium(dtype: torch.dtype, name: str) -> None:
@@ -113,6 +118,7 @@ def autograd(name: str, dd: DD, tol: float) -> None:
assert pytest.approx(ref.cpu(), abs=tol) == g.cpu()
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_autograd(dtype: torch.dtype, name: str) -> None:
@@ -121,6 +127,7 @@ def test_autograd(dtype: torch.dtype, name: str) -> None:
autograd(name, dd, tol)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", ["MB16_43_01"])
def test_autograd_medium(dtype: torch.dtype, name: str) -> None:
diff --git a/test/test_multipole/test_dipole_integral.py b/test/test_multipole/test_dipole_integral.py
index 5fac2167..bb42781a 100644
--- a/test/test_multipole/test_dipole_integral.py
+++ b/test/test_multipole/test_dipole_integral.py
@@ -33,14 +33,14 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint, has_pyscf
from dxtb._src.typing import DD, Tensor
from dxtb._src.utils import is_basis_list
-try:
+if has_pyscf is True:
from dxtb._src.exlibs.pyscf.mol import M
-except ImportError:
- M = False
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
from ..conftest import DEVICE
from .samples import samples
@@ -53,7 +53,10 @@ def snorm(overlap: Tensor) -> Tensor:
return torch.pow(overlap.diagonal(dim1=-1, dim2=-2), -0.5)
-@pytest.mark.skipif(M is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -61,7 +64,10 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
-@pytest.mark.skipif(M is False, reason="PySCF not installed")
+@pytest.mark.skipif(
+ has_pyscf is False or has_libcint is False,
+ reason="PySCF or libcint interface not installed",
+)
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_large(dtype: torch.dtype, name: str) -> None:
diff --git a/test/test_multipole/test_shape.py b/test/test_multipole/test_shape.py
index 936cf1ee..38403666 100644
--- a/test/test_multipole/test_shape.py
+++ b/test/test_multipole/test_shape.py
@@ -27,10 +27,13 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD
from dxtb._src.utils import is_basis_list
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from .samples import samples
@@ -38,6 +41,7 @@
mp_ints = ["j", "jj"] # dipole, quadrupole
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("intstr", mp_ints)
@@ -62,6 +66,7 @@ def test_single(dtype: torch.dtype, intstr: str, name: str) -> None:
assert i.shape == torch.Size((mpdim, ihelp.nao, ihelp.nao))
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_multipole/test_symmetry.py b/test/test_multipole/test_symmetry.py
index 072af244..7a1e56c0 100644
--- a/test/test_multipole/test_symmetry.py
+++ b/test/test_multipole/test_symmetry.py
@@ -29,10 +29,13 @@
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb._src.utils import is_basis_list
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from .samples import samples
@@ -69,6 +72,7 @@ def check_multipole_symmetry(multipole_tensor: Tensor) -> bool:
return is_symmetric
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("intstr", mp_ints)
@@ -97,6 +101,7 @@ def test_single(dtype: torch.dtype, intstr: str, name: str) -> None:
assert check_multipole_symmetry(i) is True
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_multipole/todo_test_dipole_grad.py b/test/test_multipole/todo_test_dipole_grad.py
index 49d81546..1ca2a860 100644
--- a/test/test_multipole/todo_test_dipole_grad.py
+++ b/test/test_multipole/todo_test_dipole_grad.py
@@ -22,16 +22,17 @@
import pytest
import torch
-from tad_mctc.autograd import dgradcheck, dgradgradcheck
-from tad_mctc.batch import pack
from dxtb import GFN1_XTB as par
from dxtb import IndexHelper
from dxtb._src.basis.bas import Basis
-from dxtb._src.exlibs import libcint
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb._src.utils import is_basis_list
+if has_libcint is True:
+ from dxtb._src.exlibs import libcint
+
from ..conftest import DEVICE
from .samples import samples
@@ -82,6 +83,7 @@ def compute_integral(pos: torch.Tensor) -> torch.Tensor:
return gradient
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.grad
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
diff --git a/test/test_properties/test_dipole.py b/test/test_properties/test_dipole.py
index c1c79945..dbf76832 100644
--- a/test/test_properties/test_dipole.py
+++ b/test/test_properties/test_dipole.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -142,6 +143,7 @@ def execute(
assert pytest.approx(dip1, abs=atol, rel=rtol) == dip2
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -152,6 +154,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -162,6 +165,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -171,6 +175,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
single(name, "dipole", field_vector, dd=dd, atol=1e-3)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -181,6 +186,7 @@ def test_single_field(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_field_large(dtype: torch.dtype, name: str) -> None:
@@ -190,6 +196,7 @@ def test_single_field_large(dtype: torch.dtype, name: str) -> None:
single(name, "dipole2", field_vector, dd=dd, atol=1e-3, rtol=1e-3)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -203,6 +210,7 @@ def test_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
###############################################################################
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@@ -251,6 +259,7 @@ def test_batch_settings(
assert pytest.approx(ref.cpu(), abs=1e-4) == dipole
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
diff --git a/test/test_properties/test_dipole_deriv.py b/test/test_properties/test_dipole_deriv.py
index 731b8a21..6d516e1f 100644
--- a/test/test_properties/test_dipole_deriv.py
+++ b/test/test_properties/test_dipole_deriv.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -168,6 +169,7 @@ def execute(
assert pytest.approx(dipder1, abs=atol) == dipder4
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -178,6 +180,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -188,6 +191,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -197,6 +201,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -207,6 +212,7 @@ def test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -219,6 +225,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -230,6 +237,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_properties/test_hyperpol.py b/test/test_properties/test_hyperpol.py
index 2e2b1119..35c76901 100644
--- a/test/test_properties/test_hyperpol.py
+++ b/test/test_properties/test_hyperpol.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -171,6 +172,7 @@ def execute(
assert pytest.approx(pol, abs=1e-4) == pol4
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -181,6 +183,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -191,6 +194,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -200,6 +204,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -210,6 +215,7 @@ def test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -222,6 +228,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -233,6 +240,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_properties/test_ir.py b/test/test_properties/test_ir.py
index 45504ab2..528f7f44 100644
--- a/test/test_properties/test_ir.py
+++ b/test/test_properties/test_ir.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -149,6 +150,7 @@ def execute(
assert pytest.approx(ints1, abs=atol2, rel=rtol2) == ints2
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -159,6 +161,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -169,6 +172,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -179,6 +183,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
# FIXME: Large deviation for all
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def skip_test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -189,6 +194,7 @@ def skip_test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -201,6 +207,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -212,6 +219,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_properties/test_pol.py b/test/test_properties/test_pol.py
index 32531991..3567c6f9 100644
--- a/test/test_properties/test_pol.py
+++ b/test/test_properties/test_pol.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -168,6 +169,7 @@ def execute(
assert pytest.approx(pol, abs=1e-2) == pol4
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -178,6 +180,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -188,6 +191,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -197,6 +201,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -207,6 +212,7 @@ def test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -219,6 +225,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -230,6 +237,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_properties/test_pol_deriv.py b/test/test_properties/test_pol_deriv.py
index 3a250f48..684e7926 100644
--- a/test/test_properties/test_pol_deriv.py
+++ b/test/test_properties/test_pol_deriv.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -162,6 +163,7 @@ def execute(
# assert pytest.approx(pol, abs=1e-8) == pol3
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -172,6 +174,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -182,6 +185,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -191,6 +195,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
single(name, field_vector, dd=dd)
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -201,6 +206,7 @@ def test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -213,6 +219,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -224,6 +231,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_properties/test_raman.py b/test/test_properties/test_raman.py
index 951c3aec..e61fa32a 100644
--- a/test/test_properties/test_raman.py
+++ b/test/test_properties/test_raman.py
@@ -29,6 +29,7 @@
from dxtb import GFN1_XTB as par
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Tensor
from dxtb.labels import INTLEVEL_DIPOLE
@@ -161,6 +162,7 @@ def execute(
assert pytest.approx(depol1, abs=atol2, rel=rtol2) == depol2
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def test_single(dtype: torch.dtype, name: str) -> None:
@@ -171,6 +173,7 @@ def test_single(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_more)
def test_single_more(dtype: torch.dtype, name: str) -> None:
@@ -181,6 +184,7 @@ def test_single_more(dtype: torch.dtype, name: str) -> None:
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist_large)
def test_single_large(dtype: torch.dtype, name: str) -> None:
@@ -191,6 +195,7 @@ def test_single_large(dtype: torch.dtype, name: str) -> None:
# FIXME: Large deviation for all
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", slist)
def skip_test_single_field(dtype: torch.dtype, name: str) -> None:
@@ -201,6 +206,7 @@ def skip_test_single_field(dtype: torch.dtype, name: str) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
@@ -213,6 +219,7 @@ def skip_test_batch(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
@pytest.mark.large
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist_large)
@@ -224,6 +231,7 @@ def skip_test_batch_large(dtype: torch.dtype, name1: str, name2) -> None:
# TODO: Batched derivatives are not supported yet
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["LiH"])
@pytest.mark.parametrize("name2", slist)
diff --git a/test/test_singlepoint/test_grad_field.py b/test/test_singlepoint/test_grad_field.py
index 9b88d8f0..f87cd61f 100644
--- a/test/test_singlepoint/test_grad_field.py
+++ b/test/test_singlepoint/test_grad_field.py
@@ -30,6 +30,7 @@
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
from dxtb._src.constants import labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Callable, Tensor
from ..conftest import DEVICE
@@ -87,6 +88,7 @@ def func(field_vector: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("xfield", xfields)
@@ -103,6 +105,7 @@ def test_gradcheck(
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
@pytest.mark.parametrize("xfield", xfields)
@@ -165,6 +168,7 @@ def func(field_vector: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["H2O"])
@pytest.mark.parametrize("name2", sample_list)
@@ -182,6 +186,7 @@ def test_gradcheck_batch(
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["H2O"])
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_singlepoint/test_grad_fieldgrad.py b/test/test_singlepoint/test_grad_fieldgrad.py
index ce47cc50..226c9408 100644
--- a/test/test_singlepoint/test_grad_fieldgrad.py
+++ b/test/test_singlepoint/test_grad_fieldgrad.py
@@ -29,6 +29,7 @@
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield, new_efield_grad
from dxtb._src.constants import labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Callable, Tensor
from ..conftest import DEVICE
@@ -80,6 +81,7 @@ def func(fieldgrad: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@@ -92,6 +94,7 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
@@ -144,6 +147,7 @@ def func(fieldgrad: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["SiH4"])
@pytest.mark.parametrize("name2", sample_list)
@@ -157,6 +161,7 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["SiH4"])
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_singlepoint/test_grad_pos_withfield.py b/test/test_singlepoint/test_grad_pos_withfield.py
index 5a281324..d664d1d9 100644
--- a/test/test_singlepoint/test_grad_pos_withfield.py
+++ b/test/test_singlepoint/test_grad_pos_withfield.py
@@ -30,6 +30,7 @@
from dxtb import Calculator
from dxtb._src.components.interactions import new_efield
from dxtb._src.constants import labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Callable, Tensor
from ..conftest import DEVICE
@@ -79,6 +80,7 @@ def func(p: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@@ -91,6 +93,7 @@ def test_gradcheck(dtype: torch.dtype, name: str) -> None:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name", sample_list)
def test_gradgradcheck(dtype: torch.dtype, name: str) -> None:
@@ -141,6 +144,7 @@ def func(p: Tensor) -> Tensor:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["SiH4"])
@pytest.mark.parametrize("name2", sample_list)
@@ -154,6 +158,7 @@ def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None:
@pytest.mark.grad
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.double])
@pytest.mark.parametrize("name1", ["SiH4"])
@pytest.mark.parametrize("name2", sample_list)
diff --git a/test/test_utils/test_misc.py b/test/test_utils/test_misc.py
index 881ebdbd..0b9b05e9 100644
--- a/test/test_utils/test_misc.py
+++ b/test/test_utils/test_misc.py
@@ -23,6 +23,7 @@
import pytest
import torch
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing.exceptions import SCFConvergenceError
from dxtb._src.utils import (
is_basis_list,
@@ -41,7 +42,7 @@ def test_lists() -> None:
assert is_str_list(None) == False # type: ignore
-def test_is_int_list():
+def test_is_int_list() -> None:
assert is_int_list([1, 2, 3]) == True
assert is_int_list([1, "a", 3]) == False
assert is_int_list([]) == True
@@ -50,13 +51,13 @@ def test_is_int_list():
assert is_int_list(None) == False # type: ignore
-def test_is_basis_list(monkeypatch):
- # Mocking the import inside the function
- from dxtb._src.exlibs.libcint import AtomCGTOBasis, CGTOBasis
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+def test_is_basis_list() -> None:
+ from dxtb._src.exlibs import libcint # type: ignore
- basis = AtomCGTOBasis(
+ basis = libcint.AtomCGTOBasis(
1,
- [CGTOBasis(1, torch.tensor([1.0]), torch.tensor([1.0]))],
+ [libcint.CGTOBasis(1, torch.tensor([1.0]), torch.tensor([1.0]))],
torch.tensor([0.0, 0.0, 0.0]),
)
From bfac71b37b480326f6eca1addf63c5b72b93aed7 Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:21:57 -0500
Subject: [PATCH 02/12] Fix tests
---
src/dxtb/_src/calculators/types/abc.py | 138 ++++++++++++------
src/dxtb/_src/calculators/types/autograd.py | 47 ++++--
src/dxtb/_src/calculators/types/base.py | 26 +++-
src/dxtb/_src/cli/driver.py | 1 -
.../components/interactions/field/efield.py | 2 +-
.../exlibs/xitorch/_impls/linalg/solve.py | 2 +-
.../exlibs/xitorch/_impls/linalg/symeig.py | 16 +-
.../driver/pytorch/impls/md/explicit.py | 4 -
.../_src/integral/driver/pytorch/overlap.py | 1 -
.../test_cache/test_properties.py | 79 ++++------
test/test_libcint/test_overlap.py | 2 +
test/test_utils/test_eigh.py | 3 +-
12 files changed, 193 insertions(+), 128 deletions(-)
diff --git a/src/dxtb/_src/calculators/types/abc.py b/src/dxtb/_src/calculators/types/abc.py
index 268e3d57..9989c99d 100644
--- a/src/dxtb/_src/calculators/types/abc.py
+++ b/src/dxtb/_src/calculators/types/abc.py
@@ -25,6 +25,11 @@
from abc import ABC, abstractmethod
+from dxtb._src.calculators.properties.vibration import (
+ IRResult,
+ RamanResult,
+ VibResult,
+)
from dxtb._src.constants import defaults
from dxtb._src.typing import Any, Literal, Tensor
@@ -56,7 +61,7 @@ def get_property(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
- ) -> Tensor:
+ ) -> Tensor | VibResult | IRResult | RamanResult:
"""
Get the named property.
@@ -86,9 +91,11 @@ def get_energy(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"energy", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_potential_energy(
self,
@@ -97,9 +104,7 @@ def get_potential_energy(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
- "energy", positions, chrg=chrg, spin=spin, **kwargs
- )
+ return self.get_energy(positions, chrg=chrg, spin=spin, **kwargs)
# nuclear derivatives
@@ -152,7 +157,7 @@ def get_forces(
Tensor
Atomic forces of shape ``(..., nat, 3)``.
"""
- return self.get_property(
+ prop = self.get_property(
"forces",
positions,
chrg=chrg,
@@ -160,6 +165,8 @@ def get_forces(
grad_mode=grad_mode,
**kwargs,
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_hessian(
self,
@@ -168,9 +175,11 @@ def get_hessian(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"hessian", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_vibration(
self,
@@ -178,10 +187,12 @@ def get_vibration(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
- ) -> Tensor:
- return self.get_property(
+ ) -> VibResult:
+ prop = self.get_property(
"vibration", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, VibResult)
+ return prop
def get_normal_modes(
self,
@@ -190,9 +201,11 @@ def get_normal_modes(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"normal_modes", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_frequencies(
self,
@@ -201,9 +214,11 @@ def get_frequencies(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"frequencies", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
# field derivatives
@@ -214,9 +229,11 @@ def get_dipole(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"dipole", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_dipole_moment(
self,
@@ -225,9 +242,7 @@ def get_dipole_moment(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
- "dipole", positions, chrg=chrg, spin=spin, **kwargs
- )
+ return self.get_dipole(positions, chrg=chrg, spin=spin, **kwargs)
def get_dipole_deriv(
self,
@@ -236,9 +251,11 @@ def get_dipole_deriv(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"dipole_derivatives", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_dipole_derivatives(
self,
@@ -247,9 +264,11 @@ def get_dipole_derivatives(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"dipole_derivatives", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_polarizability(
self,
@@ -258,9 +277,11 @@ def get_polarizability(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"polarizability", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_pol_deriv(
self,
@@ -269,13 +290,15 @@ def get_pol_deriv(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"polarizability_derivatives",
positions,
chrg=chrg,
spin=spin,
**kwargs,
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_polarizability_derivatives(
self,
@@ -284,13 +307,7 @@ def get_polarizability_derivatives(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
- "polarizability_derivatives",
- positions,
- chrg=chrg,
- spin=spin,
- **kwargs,
- )
+ return self.get_pol_deriv(positions, chrg=chrg, spin=spin, **kwargs)
def get_hyperpolarizability(
self,
@@ -299,9 +316,11 @@ def get_hyperpolarizability(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"hyperpolarizability", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
# spectra
@@ -311,10 +330,12 @@ def get_ir(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
- ) -> Tensor:
- return self.get_property(
+ ) -> IRResult:
+ prop = self.get_property(
"ir", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, IRResult)
+ return prop
def get_ir_intensities(
self,
@@ -323,9 +344,12 @@ def get_ir_intensities(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
- "ir_intensity", positions, chrg=chrg, spin=spin, **kwargs
+ prop = self.get_property(
+ "ir", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, IRResult)
+
+ return prop.ints
def get_raman(
self,
@@ -333,10 +357,12 @@ def get_raman(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
- ) -> Tensor:
- return self.get_property(
+ ) -> RamanResult:
+ prop = self.get_property(
"raman", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, RamanResult)
+ return prop
def get_raman_intensities(
self,
@@ -345,9 +371,12 @@ def get_raman_intensities(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"raman_intensity", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, RamanResult)
+
+ return prop.ints
def get_raman_depol(
self,
@@ -356,9 +385,12 @@ def get_raman_depol(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"raman_depol", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, RamanResult)
+
+ return prop.depol
# SCF properties
@@ -369,9 +401,11 @@ def get_bond_orders(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"bond_orders", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_coefficients(
self,
@@ -380,9 +414,11 @@ def get_coefficients(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"coefficients", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_density(
self,
@@ -391,9 +427,11 @@ def get_density(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"density", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_charges(
self,
@@ -402,9 +440,11 @@ def get_charges(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"charges", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_mulliken_charges(
self,
@@ -413,9 +453,7 @@ def get_mulliken_charges(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
- "charges", positions, chrg=chrg, spin=spin, **kwargs
- )
+ return self.get_charges(positions, chrg=chrg, spin=spin, **kwargs)
def get_iterations(
self,
@@ -424,9 +462,11 @@ def get_iterations(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"iterations", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_mo_energies(
self,
@@ -435,9 +475,11 @@ def get_mo_energies(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"mo_energies", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_occupation(
self,
@@ -446,9 +488,11 @@ def get_occupation(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"occupation", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
def get_potential(
self,
@@ -457,6 +501,8 @@ def get_potential(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- return self.get_property(
+ prop = self.get_property(
"potential", positions, chrg=chrg, spin=spin, **kwargs
)
+ assert isinstance(prop, Tensor)
+ return prop
diff --git a/src/dxtb/_src/calculators/types/autograd.py b/src/dxtb/_src/calculators/types/autograd.py
index e812618d..1ad9a1cc 100644
--- a/src/dxtb/_src/calculators/types/autograd.py
+++ b/src/dxtb/_src/calculators/types/autograd.py
@@ -69,7 +69,10 @@ class AutogradCalculator(EnergyCalculator):
"pol_deriv",
"hyperpolarizability",
"ir",
+ "ir_intensity",
"raman",
+ "raman_intensity",
+ "rama_depol",
]
@cdec.requires_positions_grad
@@ -385,7 +388,6 @@ def vibration(
hess,
project_translational=project_translational,
project_rotational=project_rotational,
- **kwargs,
)
return a
@@ -471,8 +473,9 @@ def dipole_deriv(
positions: Tensor,
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
- use_analytical: bool = True,
+ use_analytical_dipmom: bool = True,
use_functorch: bool = False,
+ **kwargs: Any,
) -> Tensor:
r"""
Calculate cartesian dipole derivative :math:`\mu'`.
@@ -500,7 +503,7 @@ def dipole_deriv(
Total charge. Defaults to 0.
spin : Tensor | float | int, optional
Number of unpaired electrons. Defaults to ``None``.
- use_analytical: bool, optional
+ use_analytical_dipmom: bool, optional
Whether to use the analytically calculated dipole moment for AD or
the automatically differentiated dipole moment.
use_functorch: bool, optional
@@ -511,7 +514,7 @@ def dipole_deriv(
Tensor
Cartesian dipole derivative of shape ``(..., 3, nat, 3)``.
"""
- dip_fcn = self._get_dipole_fcn(use_analytical)
+ dip_fcn = self._get_dipole_fcn(use_analytical_dipmom)
if use_functorch is True:
# pylint: disable=import-outside-toplevel
@@ -584,6 +587,10 @@ def polarizability(
Number of unpaired electrons. Defaults to ``None``.
use_functorch: bool, optional
Whether to use functorch or the standard (slower) autograd.
+ use_analytical: bool, optional
+ Whether to use the analytically calculated dipole moment for AD or
+ the automatically differentiated dipole moment. Defaults to
+ ``False``.
derived_quantity: Literal['energy', 'dipole'], optional
Which derivative to calculate for the polarizability, i.e.,
derivative of dipole moment or energy w.r.t field.
@@ -656,6 +663,7 @@ def pol_deriv(
spin: Tensor | float | int | None = defaults.SPIN,
use_functorch: bool = False,
derived_quantity: Literal["energy", "dipole"] = "dipole",
+ **kwargs: Any,
) -> Tensor:
r"""
Calculate the cartesian polarizability derivative :math:`\chi`.
@@ -696,12 +704,18 @@ def pol_deriv(
Tensor
Polarizability derivative shape ``(..., 3, 3, nat, 3)``.
"""
+ use_analytical = kwargs.pop("use_analytical", False)
+
if use_functorch is False:
# pylint: disable=import-outside-toplevel
from tad_mctc.autograd import jac
a = self.polarizability(
- positions, chrg, spin, use_functorch=use_functorch
+ positions,
+ chrg,
+ spin,
+ use_functorch=use_functorch,
+ use_analytical=use_analytical,
)
# d(3, 3) / d(nat, 3) -> (3, 3, nat*3) -> (3, 3, nat, 3)
@@ -712,7 +726,12 @@ def pol_deriv(
from tad_mctc.autograd import jacrev
chi = jacrev(self.polarizability, argnums=0)(
- positions, chrg, spin, use_functorch, derived_quantity
+ positions,
+ chrg,
+ spin,
+ use_functorch,
+ use_analytical,
+ derived_quantity,
)
assert isinstance(chi, Tensor)
@@ -849,6 +868,7 @@ def ir(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
use_functorch: bool = False,
+ **kwargs: Any,
) -> IRResult:
"""
Calculate the frequencies and intensities of IR spectra.
@@ -876,7 +896,7 @@ def ir(
logger.debug("IR spectrum: Start.")
# run vibrational analysis first
- vib_res = self.vibration(positions, chrg, spin)
+ vib_res = self.vibration(positions, chrg, spin, **kwargs)
# TODO: Figure out how to run func transforms 2x properly
# (improve: Hessian does not need dipole integral but dipder does)
@@ -886,7 +906,7 @@ def ir(
# calculate nuclear dipole derivative dmu/dR: (..., 3, nat, 3)
dmu_dr = self.dipole_deriv(
- positions, chrg, spin, use_functorch=use_functorch
+ positions, chrg, spin, use_functorch=use_functorch, **kwargs
)
intensities = ir_ints(dmu_dr, vib_res.modes)
@@ -902,6 +922,7 @@ def raman(
chrg: Tensor | float | int = defaults.CHRG,
spin: Tensor | float | int | None = defaults.SPIN,
use_functorch: bool = False,
+ **kwargs: Any,
) -> RamanResult:
"""
Calculate the frequencies, static intensities and depolarization ratio
@@ -937,7 +958,9 @@ def raman(
# TODO: Figure out how to run func transforms 2x properly
# (improve: Hessian does not need dipole integral but dipder does)
- self.reset()
+ self.classicals.reset_all()
+ self.interactions.reset_all()
+ self.integrals.reset_all()
# d(..., 3, 3) / d(..., nat, 3) -> (..., 3, 3, nat, 3)
da_dr = self.pol_deriv(
@@ -1014,7 +1037,9 @@ def calculate(
props = list(EnergyCalculator.implemented_properties)
props.remove("bond_orders")
+
if set(props) & set(properties):
+ print("Calculating energy")
self.energy(positions, chrg, spin, **kwargs)
if "forces" in properties:
@@ -1041,8 +1066,8 @@ def calculate(
if "hyperpolarizability" in properties:
self.hyperpolarizability(positions, chrg, spin, **kwargs)
- if {"ir", "ir_intensities"} in set(properties):
+ if {"ir"} & set(properties):
self.ir(positions, chrg, spin, **kwargs)
- if {"raman", "raman_intensities", "raman_depol"} & set(properties):
+ if {"raman"} & set(properties):
self.raman(positions, chrg, spin, **kwargs)
diff --git a/src/dxtb/_src/calculators/types/base.py b/src/dxtb/_src/calculators/types/base.py
index c17a32f4..1f1f6a49 100644
--- a/src/dxtb/_src/calculators/types/base.py
+++ b/src/dxtb/_src/calculators/types/base.py
@@ -372,7 +372,29 @@ def get_cache_key(self, key: str) -> str | None:
def __str__(self) -> str:
"""Return a string representation of the Cache object."""
- return f"{self.__class__.__name__}({', '.join([f'{key}={getattr(self, key)!r}' for key in self.__slots__])})"
+ counter = 0
+ l = []
+ for key in self.__slots__:
+ # skip "_cache_keys"
+ if key.startswith("_"):
+ continue
+
+ attr = getattr(self, key)
+
+ # count populated values
+ if attr is not None:
+ counter += 1
+
+ # reduce printout for tensors
+ if isinstance(attr, Tensor):
+ attr = attr.shape if len(attr.shape) > 0 else attr
+
+ l.append(f"{key}={attr!r}")
+
+ return (
+ f"{self.__class__.__name__}(populated={counter}/{len(l)}, "
+ f"{', '.join(l)})"
+ )
def __repr__(self) -> str:
"""Return a representation of the Cache object."""
@@ -715,7 +737,7 @@ def get_property(
# For some reason the calculator was not able to do what we want...
if name not in self.cache:
raise PropertyNotImplementedError(
- f"Property '{name}' not present after calculation. "
+ f"Property '{name}' not present after calculation.\n"
"This seems like an internal error. (Maybe the method you "
"are calling has no cache decorator?)"
)
diff --git a/src/dxtb/_src/cli/driver.py b/src/dxtb/_src/cli/driver.py
index e377c9e8..6ce29767 100644
--- a/src/dxtb/_src/cli/driver.py
+++ b/src/dxtb/_src/cli/driver.py
@@ -20,7 +20,6 @@
from __future__ import annotations
-import logging
from argparse import Namespace
from pathlib import Path
diff --git a/src/dxtb/_src/components/interactions/field/efield.py b/src/dxtb/_src/components/interactions/field/efield.py
index 3f4ff3f1..66dbe2ed 100644
--- a/src/dxtb/_src/components/interactions/field/efield.py
+++ b/src/dxtb/_src/components/interactions/field/efield.py
@@ -27,7 +27,7 @@
from tad_mctc.math import einsum
from dxtb import IndexHelper
-from dxtb._src.typing import Any, Slicers, Tensor, override
+from dxtb._src.typing import Slicers, Tensor, override
from dxtb._src.typing.exceptions import DeviceError, DtypeError
from ..base import Interaction, InteractionCache
diff --git a/src/dxtb/_src/exlibs/xitorch/_impls/linalg/solve.py b/src/dxtb/_src/exlibs/xitorch/_impls/linalg/solve.py
index b0455212..b93edb4a 100644
--- a/src/dxtb/_src/exlibs/xitorch/_impls/linalg/solve.py
+++ b/src/dxtb/_src/exlibs/xitorch/_impls/linalg/solve.py
@@ -222,7 +222,7 @@ def cg(
# move to the next index
pk = pk_1
- zk = zk_1
+ # zk = zk_1
xk = xk_1
rk = rk_1
rkzk = rkzk_1
diff --git a/src/dxtb/_src/exlibs/xitorch/_impls/linalg/symeig.py b/src/dxtb/_src/exlibs/xitorch/_impls/linalg/symeig.py
index 375f02d3..4ee6cfb2 100644
--- a/src/dxtb/_src/exlibs/xitorch/_impls/linalg/symeig.py
+++ b/src/dxtb/_src/exlibs/xitorch/_impls/linalg/symeig.py
@@ -102,7 +102,7 @@ def backward(ctx, grad_eival, grad_eivec):
# if in debug mode, check the degeneracy requirements
if in_debug_mode:
- degenerate = torch.any(idx)
+ # degenerate = torch.any(idx)
xtg = eivect @ grad_eivec
diff_xtg = (xtg - xtg.transpose(-2, -1).conj())[idx]
reqsat = torch.allclose(diff_xtg, torch.zeros_like(diff_xtg))
@@ -173,7 +173,7 @@ def davidson(
max_niter: int = 1000,
nguess: int | None = None,
v_init: str = "randn",
- max_addition: int | None = None,
+ # max_addition: int | None = None,
min_eps: float = 1e-6,
verbose: bool = False,
**unused,
@@ -207,8 +207,8 @@ def davidson(
if nguess is None:
nguess = neig
- if max_addition is None:
- max_addition = neig
+ # if max_addition is None:
+ # max_addition = neig
# get the shape of the transformation
na = A.shape[-1]
@@ -219,11 +219,11 @@ def davidson(
dtype = A.dtype
device = A.device
- prev_eigvals = None
prev_eigvalT = None
- stop_reason = "max_niter"
- shift_is_eigvalT = False
- idx = torch.arange(neig).unsqueeze(-1) # (neig, 1)
+ # prev_eigvals = None
+ # stop_reason = "max_niter"
+ # shift_is_eigvalT = False
+ # idx = torch.arange(neig).unsqueeze(-1) # (neig, 1)
# set up the initial guess
V = _set_initial_v(
diff --git a/src/dxtb/_src/integral/driver/pytorch/impls/md/explicit.py b/src/dxtb/_src/integral/driver/pytorch/impls/md/explicit.py
index 8f7a6f9c..b32af8f4 100644
--- a/src/dxtb/_src/integral/driver/pytorch/impls/md/explicit.py
+++ b/src/dxtb/_src/integral/driver/pytorch/impls/md/explicit.py
@@ -1443,8 +1443,6 @@ def de_f(
e302 = xij * e201 + rpi * e202
e021 = xij * e010 + rpj * e011
- e022 = xij * e011
- e032 = xij * e021 + rpj * e022
e311 = xij * e300 + rpj * e301 + 2 * e302
e410 = rpi * e310 + e311
@@ -1511,8 +1509,6 @@ def de_f(
e302 = xij * e201 + rpi * e202
e021 = xij * e010 + rpj * e011
- e022 = xij * e011
- # e032 = xij * e021 + rpj * e022
e311 = xij * e300 + rpj * e301 + 2 * e302
e410 = rpi * e310 + e311
diff --git a/src/dxtb/_src/integral/driver/pytorch/overlap.py b/src/dxtb/_src/integral/driver/pytorch/overlap.py
index 560800de..1464ac22 100644
--- a/src/dxtb/_src/integral/driver/pytorch/overlap.py
+++ b/src/dxtb/_src/integral/driver/pytorch/overlap.py
@@ -23,7 +23,6 @@
from __future__ import annotations
-import torch
from tad_mctc.convert import symmetrize
from dxtb._src.typing import Tensor
diff --git a/test/test_calculator/test_cache/test_properties.py b/test/test_calculator/test_cache/test_properties.py
index 0b838fe5..8524919a 100644
--- a/test/test_calculator/test_cache/test_properties.py
+++ b/test/test_calculator/test_cache/test_properties.py
@@ -24,7 +24,11 @@
import torch
from dxtb import GFN1_XTB
-from dxtb._src.calculators.properties.vibration import VibResult
+from dxtb._src.calculators.properties.vibration import (
+ IRResult,
+ RamanResult,
+ VibResult,
+)
from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Literal, Tensor
from dxtb.calculators import (
@@ -32,6 +36,7 @@
AutogradCalculator,
GFN1Calculator,
)
+from dxtb.components.field import new_efield
from ...conftest import DEVICE
@@ -230,7 +235,13 @@ def test_dipole(dtype: torch.dtype) -> None:
pos = positions.clone().requires_grad_(True)
options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
- calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
+
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
+
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, interaction=efield, opts=options, **dd
+ )
assert calc._ncalcs == 0
prop = calc.get_dipole(pos)
@@ -262,67 +273,33 @@ def test_ir(dtype: torch.dtype) -> None:
pos = positions.clone().requires_grad_(True)
options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
- calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
- assert calc._ncalcs == 0
-
- prop = calc.get_ir(pos)
- assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
-
- # cache is used for same calc
- prop = calc.get_ir(pos)
- assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
-
- # cache is used for energy
- prop = calc.get_energy(pos)
- assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
- # cache is used for IR intensities
- prop = calc.get_ir_intensities(pos)
- assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
- # check reset
- calc.cache.reset_all()
- assert len(calc.cache.list_cached_properties()) == 0
-
-
-@pytest.mark.skipif(not has_libcint, reason="libcint not available")
-@pytest.mark.parametrize("dtype", [torch.float, torch.double])
-def test_raman(dtype: torch.dtype) -> None:
- dd: DD = {"device": DEVICE, "dtype": dtype}
-
- numbers = torch.tensor([3, 1], device=DEVICE)
- positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
- pos = positions.clone().requires_grad_(True)
-
- options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
- calc = AutogradCalculator(numbers, GFN1_XTB, opts=options, **dd)
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, opts=options, interaction=efield, **dd
+ )
assert calc._ncalcs == 0
- prop = calc.get_raman(pos)
- assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
+ kwargs = {"use_analytical_dipmom": False, "use_functorch": True}
- # cache is used for same calc
- prop = calc.get_raman(pos)
+ prop = calc.get_ir(pos, **kwargs)
assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
+ assert isinstance(prop, IRResult)
- # cache is used for energy
- prop = calc.get_energy(pos)
+ # cache is used for same calc
+ prop = calc.get_ir(pos, **kwargs)
assert calc._ncalcs == 1
- assert isinstance(prop, Tensor)
+ assert isinstance(prop, IRResult)
- # cache is used for Raman depolarization ratio
- prop = calc.get_raman_depol(pos)
+ # cache is used for IR intensities
+ prop = calc.get_ir_intensities(pos, **kwargs)
assert calc._ncalcs == 1
assert isinstance(prop, Tensor)
- # cache is used for IR intensities
- prop = calc.get_raman_intensities(pos)
+ # cache is used for energy (kwargs mess up the cache key!)
+ prop = calc.get_energy(pos)
assert calc._ncalcs == 1
assert isinstance(prop, Tensor)
diff --git a/test/test_libcint/test_overlap.py b/test/test_libcint/test_overlap.py
index 58886aab..84cac4f2 100644
--- a/test/test_libcint/test_overlap.py
+++ b/test/test_libcint/test_overlap.py
@@ -40,6 +40,8 @@
if has_libcint is True:
from dxtb._src.exlibs import libcint
+if has_pyscf is True:
+ from dxtb._src.exlibs.pyscf.mol import M
from ..conftest import DEVICE
from .samples import samples
diff --git a/test/test_utils/test_eigh.py b/test/test_utils/test_eigh.py
index 71975a97..e82a2c7e 100644
--- a/test/test_utils/test_eigh.py
+++ b/test/test_utils/test_eigh.py
@@ -23,12 +23,11 @@
import pytest
import torch
from tad_mctc.convert import symmetrize
-from tad_mctc.storch.linalg import eighb
from torch.autograd.gradcheck import gradcheck
from dxtb._src.exlibs.xitorch import LinearOperator
from dxtb._src.exlibs.xitorch.linalg import symeig
-from dxtb._src.typing import Literal, Tensor
+from dxtb._src.typing import Tensor
from ..conftest import DEVICE
From c4a0e405ec996a18875f2e280ef647c3c6274b1f Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 12:32:12 -0500
Subject: [PATCH 03/12] Test macOS workflow
---
.github/workflows/macos-x86.yaml | 104 ++++++++++++++++++
.github/workflows/ubuntu-pytorch-1.yaml | 2 +-
.github/workflows/ubuntu.yaml | 2 +-
setup.cfg | 3 +-
.../test_cache/test_properties.py | 6 +-
tox.ini | 16 ++-
6 files changed, 119 insertions(+), 14 deletions(-)
create mode 100644 .github/workflows/macos-x86.yaml
diff --git a/.github/workflows/macos-x86.yaml b/.github/workflows/macos-x86.yaml
new file mode 100644
index 00000000..638d0cf3
--- /dev/null
+++ b/.github/workflows/macos-x86.yaml
@@ -0,0 +1,104 @@
+# This file is part of tad-mctc.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: Tests (macOS x86)
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ pull_request:
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ workflow_dispatch:
+
+jobs:
+ main:
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [macos-12, macos-13]
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
+ # only test oldest and newest version of torch
+ torch-version: ["1.11.0", "2.2.2"]
+ exclude:
+ # Check latest versions here: https://download.pytorch.org/whl/torch/
+ #
+ # PyTorch issues:
+ # 3.11: https://github.com/pytorch/pytorch/issues/86566
+ # 3.12: https://github.com/pytorch/pytorch/issues/110436
+ # 3.13: https://github.com/pytorch/pytorch/issues/130249
+ #
+ # Wheels for macOS x86_64 are deprecated since 2.3.0
+ #
+ # PyTorch<2.2.0 does only support Python<3.12 (all platforms)
+ - python-version: "3.12"
+ torch-version: "1.11.0"
+ - python-version: "3.12"
+ torch-version: "1.12.1"
+ - python-version: "3.12"
+ torch-version: "1.13.1"
+ - python-version: "3.12"
+ torch-version: "2.0.1"
+ - python-version: "3.12"
+ torch-version: "2.1.2"
+ # PyTorch<2.0.0 does only support Python<3.11 (macOS and Windows)
+ - python-version: "3.11"
+ torch-version: "1.11.0"
+ - python-version: "3.11"
+ torch-version: "1.12.1"
+ - python-version: "3.11"
+ torch-version: "1.13.1"
+
+ runs-on: ${{ matrix.os }}
+
+ defaults:
+ run:
+ shell: bash {0}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install tox
+
+ - name: Determine TOXENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+
+ - name: Print TOXENV
+ run: echo "TOXENV is set to '${{ env.TOXENV }}'."
+
+ - name: Unittests with tox
+ run: tox -e ${{ env.TOXENV }}-nolibcint
diff --git a/.github/workflows/ubuntu-pytorch-1.yaml b/.github/workflows/ubuntu-pytorch-1.yaml
index e35c52e2..bdddd281 100644
--- a/.github/workflows/ubuntu-pytorch-1.yaml
+++ b/.github/workflows/ubuntu-pytorch-1.yaml
@@ -85,7 +85,7 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}
+ run: tox -e ${{ env.TOXENV }}-libcint
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml
index 26287947..13b405f2 100644
--- a/.github/workflows/ubuntu.yaml
+++ b/.github/workflows/ubuntu.yaml
@@ -86,7 +86,7 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}
+ run: tox -e ${{ env.TOXENV }}-libcint
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
diff --git a/setup.cfg b/setup.cfg
index d291616f..c4b70658 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -43,7 +43,6 @@ install_requires =
scipy
tad-dftd3>=0.3.0
tad-dftd4>=0.2.0
- tad-libcint>=0.1.0
tad-mctc>=0.2.0
tad-multicharge
tomli
@@ -75,6 +74,8 @@ dev =
pytest-xdist
pyyaml
tox
+libcint =
+ tad-libcint>=0.1.0
tox =
covdefaults
pyscf
diff --git a/test/test_calculator/test_cache/test_properties.py b/test/test_calculator/test_cache/test_properties.py
index 8524919a..60cf2509 100644
--- a/test/test_calculator/test_cache/test_properties.py
+++ b/test/test_calculator/test_cache/test_properties.py
@@ -24,11 +24,7 @@
import torch
from dxtb import GFN1_XTB
-from dxtb._src.calculators.properties.vibration import (
- IRResult,
- RamanResult,
- VibResult,
-)
+from dxtb._src.calculators.properties.vibration import IRResult, VibResult
from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import DD, Literal, Tensor
from dxtb.calculators import (
diff --git a/tox.ini b/tox.ini
index d7d82f03..3f4eb551 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,11 +18,11 @@
min_version = 4.0
isolated_build = True
envlist =
- py38-torch{1110,1121,1131,201,212,222,231,240},
- py39-torch{1110,1121,1131,201,212,222,231,240},
- py310-torch{1110,1121,1131,201,212,222,231,240},
- py311-torch{1131,201,212,222,231,240}
- py312-torch{222,231,240}
+ py38-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
+ py39-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
+ py310-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
+ py311-torch{1131,201,212,222,231,240}-{nolibcint,libcint},
+ py312-torch{222,231,240}-{nolibcint,libcint}
[testenv]
setenv =
@@ -44,7 +44,11 @@ deps =
torch230: torch==2.3.0
torch231: torch==2.3.1
torch240: torch==2.4.0
- .[tox]
+
+extras =
+ tox
+ libcint: libcint
+
commands =
pytest -vv {posargs: \
-m "not large" \
From 1175927a6373422a98e4b1cb97ae5477f786f58b Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 13:13:27 -0500
Subject: [PATCH 04/12] Reduce test fail percentage
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 521cce91..e2aefde1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -50,7 +50,7 @@ omit = [
]
[tool.coverage.report]
-fail_under = 75
+fail_under = 70
[tool.black]
From 7b60f796c5077ed3e83b2aca08269c0e9c9744d3 Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 13:14:02 -0500
Subject: [PATCH 05/12] Windows, macOS arm, ubuntu no libcint
---
.github/workflows/macos-arm.yaml | 96 ++++++++++++++++++++++
.github/workflows/macos-x86.yaml | 2 +-
.github/workflows/ubuntu-nolibcint.yaml | 99 +++++++++++++++++++++++
.github/workflows/windows.yaml | 102 ++++++++++++++++++++++++
4 files changed, 298 insertions(+), 1 deletion(-)
create mode 100644 .github/workflows/macos-arm.yaml
create mode 100644 .github/workflows/ubuntu-nolibcint.yaml
create mode 100644 .github/workflows/windows.yaml
diff --git a/.github/workflows/macos-arm.yaml b/.github/workflows/macos-arm.yaml
new file mode 100644
index 00000000..27c922e2
--- /dev/null
+++ b/.github/workflows/macos-arm.yaml
@@ -0,0 +1,96 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: Tests (macOS arm)
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ pull_request:
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ workflow_dispatch:
+
+jobs:
+ main:
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [macos-14]
+ # Python 3.8/3.9 is not on macos-latest (macos-14-arm64)
+ # https://github.com/actions/setup-python/issues/696
+ python-version: ["3.10", "3.11", "3.12"]
+ # only test oldest and newest version of torch
+ torch-version: ["1.11.0", "2.4.0"]
+ exclude:
+ # Check latest versions here: https://download.pytorch.org/whl/torch/
+ #
+ # PyTorch now fully supports Python=<3.11
+ # see: https://github.com/pytorch/pytorch/issues/86566
+ #
+ # PyTorch does now support Python 3.12 (macOS only 2.2)
+ # see: https://github.com/pytorch/pytorch/issues/110436
+ - python-version: "3.12"
+ torch-version: "1.11.0"
+ # PyTorch<1.13.0 does only support Python=<3.10
+ # On macOS and Windows, 1.13.x is also not supported for Python>=3.10
+ - python-version: "3.11"
+ torch-version: "1.11.0"
+ - python-version: "3.11"
+ torch-version: "1.12.1"
+ - python-version: "3.11"
+ torch-version: "1.13.1"
+
+ runs-on: ${{ matrix.os }}
+
+ defaults:
+ run:
+ shell: bash {0}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install tox
+
+ - name: Determine TOXENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+
+ - name: Print TOXENV
+ run: echo "TOXENV is set to '${{ env.TOXENV }}'."
+
+ - name: Unittests with tox
+ run: tox -e ${{ env.TOXENV }}-nolibcint
diff --git a/.github/workflows/macos-x86.yaml b/.github/workflows/macos-x86.yaml
index 638d0cf3..844b67bc 100644
--- a/.github/workflows/macos-x86.yaml
+++ b/.github/workflows/macos-x86.yaml
@@ -1,4 +1,4 @@
-# This file is part of tad-mctc.
+# This file is part of dxtb.
#
# SPDX-Identifier: Apache-2.0
# Copyright (C) 2024 Grimme Group
diff --git a/.github/workflows/ubuntu-nolibcint.yaml b/.github/workflows/ubuntu-nolibcint.yaml
new file mode 100644
index 00000000..5da9a082
--- /dev/null
+++ b/.github/workflows/ubuntu-nolibcint.yaml
@@ -0,0 +1,99 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: Tests (Ubuntu, no libcint)
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ pull_request:
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ workflow_dispatch:
+
+jobs:
+ main:
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
+ torch-version: ["2.3.1"]
+ exclude:
+ # Check latest versions here: https://download.pytorch.org/whl/torch/
+ #
+ # PyTorch issues:
+ # 3.11: https://github.com/pytorch/pytorch/issues/86566
+ # 3.12: https://github.com/pytorch/pytorch/issues/110436
+ # 3.13: https://github.com/pytorch/pytorch/issues/130249
+ #
+ # PyTorch<2.2.0 does only support Python<3.12 (all platforms)
+ - python-version: "3.12"
+ torch-version: "2.0.1"
+ - python-version: "3.12"
+ torch-version: "2.1.2"
+
+ runs-on: ${{ matrix.os }}
+
+ defaults:
+ run:
+ shell: bash {0}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install tox
+
+ - name: Determine TOXENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+
+ - name: Print TOXENV
+ run: echo "TOXENV is set to '${{ env.TOXENV }}'."
+
+ - name: Unittests with tox
+ run: tox -e ${{ env.TOXENV }}-nolibcint
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ if: >
+ matrix.python-version == '3.11' &&
+ matrix.torch-version == '2.2.2' &&
+ matrix.os == 'ubuntu-latest'
+ with:
+ files: ./coverage.xml # optional
+ token: ${{ secrets.CODECOV_TOKEN }} # required
+ verbose: true # optional (default = false)
diff --git a/.github/workflows/windows.yaml b/.github/workflows/windows.yaml
new file mode 100644
index 00000000..3a37e4b0
--- /dev/null
+++ b/.github/workflows/windows.yaml
@@ -0,0 +1,102 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+name: Tests (Windows)
+
+on:
+ push:
+ branches:
+ - main
+ - master
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ pull_request:
+ paths-ignore:
+ - "doc*/**"
+ - "./*.ya?ml"
+ - "**/*.md"
+ - "**/*.rst"
+
+ workflow_dispatch:
+
+jobs:
+ main:
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [windows-latest]
+ python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ # only test oldest and newest version of torch
+ torch-version: ["1.11.0", "2.4.0"]
+ exclude:
+ # Check latest versions here: https://download.pytorch.org/whl/torch/
+ #
+ # PyTorch issues:
+ # 3.11: https://github.com/pytorch/pytorch/issues/86566
+ # 3.12: https://github.com/pytorch/pytorch/issues/110436
+ # 3.13: https://github.com/pytorch/pytorch/issues/130249
+ #
+ # PyTorch<2.2.0 does only support Python<3.12 (all platforms)
+ - python-version: "3.12"
+ torch-version: "1.11.0"
+ - python-version: "3.12"
+ torch-version: "1.12.1"
+ - python-version: "3.12"
+ torch-version: "1.13.1"
+ - python-version: "3.12"
+ torch-version: "2.0.1"
+ - python-version: "3.12"
+ torch-version: "2.1.2"
+ # PyTorch<2.0.0 does only support Python<3.11 (macOS and Windows)
+ - python-version: "3.11"
+ torch-version: "1.11.0"
+ - python-version: "3.11"
+ torch-version: "1.12.1"
+ - python-version: "3.11"
+ torch-version: "1.13.1"
+
+ runs-on: ${{ matrix.os }}
+
+ defaults:
+ run:
+ shell: bash {0}
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ python3 -m pip install --upgrade pip
+ python3 -m pip install tox
+
+ - name: Determine TOXENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+
+ - name: Print TOXENV
+ run: echo "TOXENV is set to '${{ env.TOXENV }}'."
+
+ - name: Unittests with tox
+ run: tox -e ${{ env.TOXENV }}
From e03188ef29d948a6c24a1225dfd35889bc899d99 Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 13:21:00 -0500
Subject: [PATCH 06/12] Fix workflow
---
.github/workflows/macos-arm.yaml | 8 ++++----
.github/workflows/macos-x86.yaml | 4 ++--
.github/workflows/ubuntu-nolibcint.yaml | 6 +++---
.github/workflows/ubuntu.yaml | 2 +-
.github/workflows/windows.yaml | 4 ++--
tox.ini | 11 ++++++-----
6 files changed, 18 insertions(+), 17 deletions(-)
diff --git a/.github/workflows/macos-arm.yaml b/.github/workflows/macos-arm.yaml
index 27c922e2..b043f64b 100644
--- a/.github/workflows/macos-arm.yaml
+++ b/.github/workflows/macos-arm.yaml
@@ -44,9 +44,9 @@ jobs:
os: [macos-14]
# Python 3.8/3.9 is not on macos-latest (macos-14-arm64)
# https://github.com/actions/setup-python/issues/696
- python-version: ["3.10", "3.11", "3.12"]
+ python-version: ["3.10", "3.11"]
# only test oldest and newest version of torch
- torch-version: ["1.11.0", "2.4.0"]
+ torch-version: ["1.11.0", "2.4.1"]
exclude:
# Check latest versions here: https://download.pytorch.org/whl/torch/
#
@@ -87,10 +87,10 @@ jobs:
python3 -m pip install tox
- name: Determine TOXENV
- run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')-nolibcint" >> $GITHUB_ENV
- name: Print TOXENV
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}-nolibcint
+ run: tox -e ${{ env.TOXENV }}
diff --git a/.github/workflows/macos-x86.yaml b/.github/workflows/macos-x86.yaml
index 844b67bc..87009ae1 100644
--- a/.github/workflows/macos-x86.yaml
+++ b/.github/workflows/macos-x86.yaml
@@ -95,10 +95,10 @@ jobs:
python3 -m pip install tox
- name: Determine TOXENV
- run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')-nolibcint" >> $GITHUB_ENV
- name: Print TOXENV
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}-nolibcint
+ run: tox -e ${{ env.TOXENV }}
diff --git a/.github/workflows/ubuntu-nolibcint.yaml b/.github/workflows/ubuntu-nolibcint.yaml
index 5da9a082..656bfc45 100644
--- a/.github/workflows/ubuntu-nolibcint.yaml
+++ b/.github/workflows/ubuntu-nolibcint.yaml
@@ -43,7 +43,7 @@ jobs:
matrix:
os: [ubuntu-latest]
python-version: ["3.8", "3.9", "3.10", "3.11"]
- torch-version: ["2.3.1"]
+ torch-version: ["2.4.1"]
exclude:
# Check latest versions here: https://download.pytorch.org/whl/torch/
#
@@ -79,13 +79,13 @@ jobs:
python3 -m pip install tox
- name: Determine TOXENV
- run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')-nolibcint" >> $GITHUB_ENV
- name: Print TOXENV
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}-nolibcint
+ run: tox -e ${{ env.TOXENV }}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml
index 13b405f2..ac363481 100644
--- a/.github/workflows/ubuntu.yaml
+++ b/.github/workflows/ubuntu.yaml
@@ -44,7 +44,7 @@ jobs:
os: [ubuntu-latest]
# python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
python-version: ["3.8", "3.9", "3.10", "3.11"]
- torch-version: ["2.0.1", "2.1.2", "2.2.2", "2.3.1"]
+ torch-version: ["2.0.1", "2.1.2", "2.2.2", "2.3.1", "2.4.1"]
exclude:
# Check latest versions here: https://download.pytorch.org/whl/torch/
#
diff --git a/.github/workflows/windows.yaml b/.github/workflows/windows.yaml
index 3a37e4b0..231acfdb 100644
--- a/.github/workflows/windows.yaml
+++ b/.github/workflows/windows.yaml
@@ -44,7 +44,7 @@ jobs:
os: [windows-latest]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
# only test oldest and newest version of torch
- torch-version: ["1.11.0", "2.4.0"]
+ torch-version: ["1.11.0", "2.4.1"]
exclude:
# Check latest versions here: https://download.pytorch.org/whl/torch/
#
@@ -93,7 +93,7 @@ jobs:
python3 -m pip install tox
- name: Determine TOXENV
- run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV
+ run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')-nolibcint" >> $GITHUB_ENV
- name: Print TOXENV
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
diff --git a/tox.ini b/tox.ini
index 3f4eb551..6f2fe1fe 100644
--- a/tox.ini
+++ b/tox.ini
@@ -18,11 +18,11 @@
min_version = 4.0
isolated_build = True
envlist =
- py38-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
- py39-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
- py310-torch{1110,1121,1131,201,212,222,231,240}-{nolibcint,libcint},
- py311-torch{1131,201,212,222,231,240}-{nolibcint,libcint},
- py312-torch{222,231,240}-{nolibcint,libcint}
+ py38-torch{1110,1121,1131,201,212,222,231,240,241}-{nolibcint,libcint},
+ py39-torch{1110,1121,1131,201,212,222,231,240,241}-{nolibcint,libcint},
+ py310-torch{1110,1121,1131,201,212,222,231,240,241}-{nolibcint,libcint},
+ py311-torch{1131,201,212,222,231,240,241}-{nolibcint,libcint},
+ py312-torch{222,231,240,241}-{nolibcint,libcint}
[testenv]
setenv =
@@ -44,6 +44,7 @@ deps =
torch230: torch==2.3.0
torch231: torch==2.3.1
torch240: torch==2.4.0
+ torch241: torch==2.4.1
extras =
tox
From 913e62066ee75bd01938e8e8871962fc6cde848e Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 14:10:48 -0500
Subject: [PATCH 07/12] Make pyscf optional
---
.github/workflows/macos-arm.yaml | 2 +-
.github/workflows/macos-x86.yaml | 2 +-
.github/workflows/ubuntu-nolibcint.yaml | 2 +-
.github/workflows/ubuntu-pytorch-1.yaml | 13 +--
.github/workflows/ubuntu.yaml | 2 +-
setup.cfg | 3 +-
src/dxtb/_src/calculators/config/integral.py | 20 ++---
test/test_config/test_exlibs_available.py | 42 ++++++++++
test/test_config/test_integral.py | 85 ++++++++++++++++++++
tox.ini | 1 +
10 files changed, 146 insertions(+), 26 deletions(-)
create mode 100644 test/test_config/test_exlibs_available.py
create mode 100644 test/test_config/test_integral.py
diff --git a/.github/workflows/macos-arm.yaml b/.github/workflows/macos-arm.yaml
index b043f64b..2fd001fd 100644
--- a/.github/workflows/macos-arm.yaml
+++ b/.github/workflows/macos-arm.yaml
@@ -93,4 +93,4 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}
+ run: EXTRAS=pyscf tox -e ${{ env.TOXENV }}
diff --git a/.github/workflows/macos-x86.yaml b/.github/workflows/macos-x86.yaml
index 87009ae1..fd9af03c 100644
--- a/.github/workflows/macos-x86.yaml
+++ b/.github/workflows/macos-x86.yaml
@@ -101,4 +101,4 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}
+ run: EXTRAS=pyscf tox -e ${{ env.TOXENV }}
diff --git a/.github/workflows/ubuntu-nolibcint.yaml b/.github/workflows/ubuntu-nolibcint.yaml
index 656bfc45..a4aa8631 100644
--- a/.github/workflows/ubuntu-nolibcint.yaml
+++ b/.github/workflows/ubuntu-nolibcint.yaml
@@ -85,7 +85,7 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}
+ run: EXTRAS=pyscf tox -e ${{ env.TOXENV }}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
diff --git a/.github/workflows/ubuntu-pytorch-1.yaml b/.github/workflows/ubuntu-pytorch-1.yaml
index bdddd281..40c04178 100644
--- a/.github/workflows/ubuntu-pytorch-1.yaml
+++ b/.github/workflows/ubuntu-pytorch-1.yaml
@@ -85,15 +85,4 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}-libcint
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v3
- if: >
- matrix.python-version == '3.11' &&
- matrix.torch-version == '2.2.2' &&
- matrix.os == 'ubuntu-latest'
- with:
- files: ./coverage.xml # optional
- token: ${{ secrets.CODECOV_TOKEN }} # required
- verbose: true # optional (default = false)
+ run: EXTRAS=pyscf tox -e ${{ env.TOXENV }}-libcint
diff --git a/.github/workflows/ubuntu.yaml b/.github/workflows/ubuntu.yaml
index ac363481..fb62714c 100644
--- a/.github/workflows/ubuntu.yaml
+++ b/.github/workflows/ubuntu.yaml
@@ -86,7 +86,7 @@ jobs:
run: echo "TOXENV is set to '${{ env.TOXENV }}'."
- name: Unittests with tox
- run: tox -e ${{ env.TOXENV }}-libcint
+ run: EXTRAS=pyscf tox -e ${{ env.TOXENV }}-libcint
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
diff --git a/setup.cfg b/setup.cfg
index c4b70658..f1cddd74 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -76,9 +76,10 @@ dev =
tox
libcint =
tad-libcint>=0.1.0
+pyscf =
+ pyscf
tox =
covdefaults
- pyscf
pytest
pytest-cov
pytest-random-order
diff --git a/src/dxtb/_src/calculators/config/integral.py b/src/dxtb/_src/calculators/config/integral.py
index 660342f0..4ae09545 100644
--- a/src/dxtb/_src/calculators/config/integral.py
+++ b/src/dxtb/_src/calculators/config/integral.py
@@ -34,6 +34,15 @@ class ConfigIntegrals:
converted to integers in the constructor.
"""
+ cutoff: float
+ """
+ Real-space cutoff (in Bohr) for integral evaluation for PyTorch.
+ The ``libint`` driver ignores this option.
+ """
+
+ driver: int
+ """Type of integral driver."""
+
level: int
"""
Indicator for integrals to compute.
@@ -45,15 +54,6 @@ class ConfigIntegrals:
- 4: +quadrupole
"""
- cutoff: float
- """
- Real-space cutoff (in Bohr) for integral evaluation for PyTorch.
- The ``libint`` driver ignores this option.
- """
-
- driver: int
- """Type of integral driver."""
-
uplo: Literal["n", "l", "u"]
"""Integral mode for PyTorch integral calculation."""
@@ -99,6 +99,8 @@ def __init__(
self.driver = labels.INTDRIVER_ANALYTICAL
elif driver.casefold() in labels.INTDRIVER_AUTOGRAD_STRS:
self.driver = labels.INTDRIVER_AUTOGRAD
+ elif driver.casefold() in labels.INTDRIVER_LEGACY_STRS:
+ self.driver = labels.INTDRIVER_LEGACY
else:
raise ValueError(f"Unknown integral driver '{driver}'.")
diff --git a/test/test_config/test_exlibs_available.py b/test/test_config/test_exlibs_available.py
new file mode 100644
index 00000000..750db02c
--- /dev/null
+++ b/test/test_config/test_exlibs_available.py
@@ -0,0 +1,42 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Test availability of external libraries.
+"""
+
+from dxtb._src.exlibs.available import has_libcint, has_pyscf, has_scipy
+
+
+def test_libcint() -> None:
+ if has_libcint is True:
+ assert has_libcint is True
+ else:
+ assert has_libcint is False
+
+
+def test_pyscf() -> None:
+ if has_pyscf is True:
+ assert has_pyscf is True
+ else:
+ assert has_pyscf is False
+
+
+def test_scipy() -> None:
+ if has_scipy is True:
+ assert has_scipy is True
+ else:
+ assert has_scipy is False
diff --git a/test/test_config/test_integral.py b/test/test_config/test_integral.py
new file mode 100644
index 00000000..e0edc4a5
--- /dev/null
+++ b/test/test_config/test_integral.py
@@ -0,0 +1,85 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Test integral configuration.
+"""
+
+from __future__ import annotations
+
+import pytest
+
+from dxtb._src.constants import defaults, labels
+from dxtb._src.exlibs.available import has_libcint
+from dxtb.config import ConfigIntegrals as Cfg
+
+
+def test_default() -> None:
+ cfg = Cfg()
+ assert cfg.cutoff == defaults.INTCUTOFF
+ assert cfg.level == defaults.INTLEVEL
+ assert cfg.uplo == defaults.INTUPLO
+
+
+def test_default_driver() -> None:
+ cfg = Cfg()
+
+ if has_libcint is True:
+ assert cfg.driver == defaults.INTDRIVER
+ else:
+ assert cfg.driver == labels.INTDRIVER_ANALYTICAL
+
+
+def test_driver_pytorch() -> None:
+ cfg = Cfg(driver=labels.INTDRIVER_ANALYTICAL_STRS[0])
+ assert cfg.driver == labels.INTDRIVER_ANALYTICAL
+
+ cfg = Cfg(driver=labels.INTDRIVER_AUTOGRAD_STRS[0])
+ assert cfg.driver == labels.INTDRIVER_AUTOGRAD
+
+ cfg = Cfg(driver=labels.INTDRIVER_LEGACY_STRS[0])
+ assert cfg.driver == labels.INTDRIVER_LEGACY
+
+
+def test_driver_libcint() -> None:
+
+ if has_libcint is False:
+ with pytest.raises(ValueError):
+ Cfg(driver=labels.INTDRIVER_LIBCINT_STRS[0])
+ else:
+ cfg = Cfg(driver=labels.INTDRIVER_LIBCINT_STRS[0])
+ assert cfg.driver == labels.INTDRIVER_LIBCINT
+
+
+def test_fail_driver() -> None:
+ with pytest.raises(ValueError):
+ Cfg(driver=-999)
+
+ with pytest.raises(ValueError):
+ Cfg(driver="-999")
+
+ with pytest.raises(TypeError):
+ Cfg(driver=1.0) # type: ignore
+
+
+def test_fail_level() -> None:
+ with pytest.raises(TypeError):
+ Cfg(level="overlap") # type: ignore
+
+
+def test_fail_uplo() -> None:
+ with pytest.raises(ValueError):
+ Cfg(uplo="symmetric") # type: ignore
diff --git a/tox.ini b/tox.ini
index 6f2fe1fe..a924c68b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -49,6 +49,7 @@ deps =
extras =
tox
libcint: libcint
+ {env:EXTRAS}
commands =
pytest -vv {posargs: \
From 3c0f6822058bde04308c97219ab41be8e581343f Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 14:44:33 -0500
Subject: [PATCH 08/12] Remove Python 3.12
---
.github/workflows/windows.yaml | 2 +-
README.md | 44 ++++++++++++++++++++--
docs/source/01_quickstart/installation.rst | 26 +++++++++++++
3 files changed, 68 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/windows.yaml b/.github/workflows/windows.yaml
index 231acfdb..462aaee0 100644
--- a/.github/workflows/windows.yaml
+++ b/.github/workflows/windows.yaml
@@ -42,7 +42,7 @@ jobs:
fail-fast: false
matrix:
os: [windows-latest]
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
# only test oldest and newest version of torch
torch-version: ["1.11.0", "2.4.1"]
exclude:
diff --git a/README.md b/README.md
index 56b33699..34d0b01e 100644
--- a/README.md
+++ b/README.md
@@ -16,12 +16,21 @@
-
+
+
+
+
@@ -61,9 +70,11 @@ With *dxtb*, we provide a re-implementation of the xTB methods in PyTorch, which
*dxtb* can easily be installed with ``pip``.
```sh
-pip install dxtb
+pip install dxtb[libcint]
```
+Installing the libcint interface is highly recommended, as it is significantly faster than the pure PyTorch implementation and provides access to higher-order multipole integrals and their derivatives.
+
### conda
@@ -73,6 +84,8 @@ pip install dxtb
conda install dxtb
```
+Don't forget to install the libcint interface (not on conda) via ``pip install tad-libcint``.
+
### Other
For more options, see the [installation guide](https://dxtb.readthedocs.io/en/latest/01_quickstart/installation.html) in the documentation.
@@ -113,6 +126,31 @@ assert torch.equal(forces, -g)
For more examples and details, check out [the documentation](https://dxtb.readthedocs.io).
+## Compatibility
+
+| PyTorch \ Python | 3.8 | 3.9 | 3.10 | 3.11 | 3.12 |
+|------------------|--------------------|--------------------|--------------------|--------------------|--------------------|
+| 1.11.0 | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: |
+| 1.12.1 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: | :x: |
+| 1.13.1 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
+| 2.0.1 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
+| 2.1.2 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
+| 2.2.2 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
+| 2.3.1 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :x: |
+| 2.4.1 | not tested | not tested | not tested | not tested | :x: |
+
+Note that only the latest bug fix version is listed, but all preceding bug fix minor versions are supported.
+For example, although only version 2.2.2 is listed, version 2.2.0 and 2.2.1 are also supported.
+
+On macOS and Windows, PyTorch<2.0.0 does only support Python<3.11.
+
+**Restriction for macOS and Windows:**
+The libcint interface is **not** available for macOS and Windows.
+Correspondingly, the integral evaluation can be considerably slower.
+Moreover, higher-order multipole integrals (dipole, quadrupole, ...) are not implemented.
+
+While macOS support may be considered in the future, native Windows support is not possible, because the underlying [libcint](https://github.com/sunqm/libcint) library does not work under Windows.
+
## Citation
diff --git a/docs/source/01_quickstart/installation.rst b/docs/source/01_quickstart/installation.rst
index 7371b690..6eecadb1 100644
--- a/docs/source/01_quickstart/installation.rst
+++ b/docs/source/01_quickstart/installation.rst
@@ -16,6 +16,10 @@ pip
pip install dxtb
+Installing the libcint interface is highly recommended, as it is significantly
+faster than the pure PyTorch implementation and provides access to higher-order
+multipole integrals.
+
conda
-----
@@ -31,6 +35,9 @@ conda
mamba install dxtb
+Don't forget to install the libcint interface (not on conda) via ``pip install tad-libcint``.
+
+
From source
-----------
@@ -58,6 +65,25 @@ Install this project with ``pip`` in the environment
pip install .
+Without pip
+-----------
+
+If you want to install the package without pip, start by cloning the repository.
+
+.. code-block:: shell
+
+ DEST=/opt/software
+ git clone https://github.com/grimme-lab/dxtb $DEST/dxtb
+
+Next, add /dxtb/src to your `$PYTHONPATH` environment variable.
+For the command line interface, add /dxtb/bin to your `$PATH` environment variable.
+
+.. code-block:: shell
+
+ export PYTHONPATH=$PYTHONPATH:$DEST/dxtb/src
+ export PATH=$PATH:$DEST/dxtb/bin
+
+
Dependencies
------------
From cb8fa6e8574c872b0f632e397651c9773a605836 Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Mon, 16 Sep 2024 22:34:02 -0500
Subject: [PATCH 09/12] Additional tests
---
src/dxtb/_src/calculators/types/abc.py | 31 +--
src/dxtb/_src/calculators/types/autograd.py | 1 -
src/dxtb/_src/calculators/types/base.py | 4 +-
.../test_cache/test_properties.py | 222 ++++++++++++++++++
test/test_config/test_main.py | 107 +++++++++
5 files changed, 347 insertions(+), 18 deletions(-)
create mode 100644 test/test_config/test_main.py
diff --git a/src/dxtb/_src/calculators/types/abc.py b/src/dxtb/_src/calculators/types/abc.py
index 9989c99d..a3dd0abd 100644
--- a/src/dxtb/_src/calculators/types/abc.py
+++ b/src/dxtb/_src/calculators/types/abc.py
@@ -252,7 +252,7 @@ def get_dipole_deriv(
**kwargs: Any,
) -> Tensor:
prop = self.get_property(
- "dipole_derivatives", positions, chrg=chrg, spin=spin, **kwargs
+ "dipole_deriv", positions, chrg=chrg, spin=spin, **kwargs
)
assert isinstance(prop, Tensor)
return prop
@@ -264,11 +264,7 @@ def get_dipole_derivatives(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
- prop = self.get_property(
- "dipole_derivatives", positions, chrg=chrg, spin=spin, **kwargs
- )
- assert isinstance(prop, Tensor)
- return prop
+ return self.get_dipole_deriv(positions, chrg=chrg, spin=spin, **kwargs)
def get_polarizability(
self,
@@ -291,11 +287,7 @@ def get_pol_deriv(
**kwargs: Any,
) -> Tensor:
prop = self.get_property(
- "polarizability_derivatives",
- positions,
- chrg=chrg,
- spin=spin,
- **kwargs,
+ "pol_deriv", positions, chrg=chrg, spin=spin, **kwargs
)
assert isinstance(prop, Tensor)
return prop
@@ -440,11 +432,15 @@ def get_charges(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
+ # pylint: disable=import-outside-toplevel
+ from dxtb._src.scf.base import Charges
+
prop = self.get_property(
"charges", positions, chrg=chrg, spin=spin, **kwargs
)
- assert isinstance(prop, Tensor)
- return prop
+ assert isinstance(prop, Charges)
+
+ return prop.mono
def get_mulliken_charges(
self,
@@ -501,8 +497,13 @@ def get_potential(
spin: Tensor | float | int | None = defaults.SPIN,
**kwargs: Any,
) -> Tensor:
+ # pylint: disable=import-outside-toplevel
+ from dxtb._src.scf.base import Potential
+
prop = self.get_property(
"potential", positions, chrg=chrg, spin=spin, **kwargs
)
- assert isinstance(prop, Tensor)
- return prop
+ assert isinstance(prop, Potential)
+ assert isinstance(prop.mono, Tensor)
+
+ return prop.mono
diff --git a/src/dxtb/_src/calculators/types/autograd.py b/src/dxtb/_src/calculators/types/autograd.py
index 1ad9a1cc..540ac800 100644
--- a/src/dxtb/_src/calculators/types/autograd.py
+++ b/src/dxtb/_src/calculators/types/autograd.py
@@ -1039,7 +1039,6 @@ def calculate(
props.remove("bond_orders")
if set(props) & set(properties):
- print("Calculating energy")
self.energy(positions, chrg, spin, **kwargs)
if "forces" in properties:
diff --git a/src/dxtb/_src/calculators/types/base.py b/src/dxtb/_src/calculators/types/base.py
index 1f1f6a49..a3b92aed 100644
--- a/src/dxtb/_src/calculators/types/base.py
+++ b/src/dxtb/_src/calculators/types/base.py
@@ -370,7 +370,7 @@ def get_cache_key(self, key: str) -> str | None:
# printing
- def __str__(self) -> str:
+ def __str__(self) -> str: # pragma: no cover
"""Return a string representation of the Cache object."""
counter = 0
l = []
@@ -396,7 +396,7 @@ def __str__(self) -> str:
f"{', '.join(l)})"
)
- def __repr__(self) -> str:
+ def __repr__(self) -> str: # pragma: no cover
"""Return a representation of the Cache object."""
return str(self)
diff --git a/test/test_calculator/test_cache/test_properties.py b/test/test_calculator/test_cache/test_properties.py
index 60cf2509..553b0d8c 100644
--- a/test/test_calculator/test_cache/test_properties.py
+++ b/test/test_calculator/test_cache/test_properties.py
@@ -73,6 +73,68 @@ def test_energy(dtype: torch.dtype) -> None:
assert len(calc.cache.list_cached_properties()) == 0
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_scf_props(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+
+ options = dict(
+ opts,
+ **{
+ "cache_charges": True,
+ "cache_coefficients": True,
+ "cache_density": True,
+ "cache_iterations": True,
+ "cache_mo_energies": True,
+ "cache_occupation": True,
+ "cache_potential": True,
+ },
+ )
+
+ calc = GFN1Calculator(numbers, opts=options, **dd)
+ assert calc._ncalcs == 0
+
+ energy = calc.get_energy(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(energy, Tensor)
+
+ # get other properties
+
+ prop = calc.get_charges(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_mulliken_charges(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_coefficients(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_density(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_iterations(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_occupation(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ prop = calc.get_potential(positions)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
@pytest.mark.parametrize("grad_mode", ["functorch", "row"])
def test_forces(
@@ -259,6 +321,166 @@ def test_dipole(dtype: torch.dtype) -> None:
assert len(calc.cache.list_cached_properties()) == 0
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_dipole_deriv(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
+
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, opts=options, interaction=efield, **dd
+ )
+ assert calc._ncalcs == 0
+
+ kwargs = {"use_analytical_dipmom": False, "use_functorch": True}
+
+ prop = calc.get_dipole_deriv(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_dipole_derivatives(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy (kwargs mess up the cache key!)
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_polarizability(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
+
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, opts=options, interaction=efield, **dd
+ )
+ assert calc._ncalcs == 0
+
+ kwargs = {"use_functorch": True}
+
+ prop = calc.get_polarizability(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_polarizability(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy (kwargs mess up the cache key!)
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_pol_deriv(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
+
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, opts=options, interaction=efield, **dd
+ )
+ assert calc._ncalcs == 0
+
+ kwargs = {"use_functorch": True}
+
+ prop = calc.get_pol_deriv(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_polarizability_derivatives(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy (kwargs mess up the cache key!)
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
+@pytest.mark.skipif(not has_libcint, reason="libcint not available")
+@pytest.mark.parametrize("dtype", [torch.float, torch.double])
+def test_hyperpolarizability(dtype: torch.dtype) -> None:
+ dd: DD = {"device": DEVICE, "dtype": dtype}
+
+ numbers = torch.tensor([3, 1], device=DEVICE)
+ positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], **dd)
+ pos = positions.clone().requires_grad_(True)
+
+ options = dict(opts, **{"scf_mode": "full", "mixer": "anderson"})
+
+ field = torch.tensor([0, 0, 0], **dd, requires_grad=True)
+ efield = new_efield(field, **dd)
+
+ calc = AutogradCalculator(
+ numbers, GFN1_XTB, opts=options, interaction=efield, **dd
+ )
+ assert calc._ncalcs == 0
+
+ kwargs = {"use_functorch": True}
+
+ prop = calc.get_hyperpolarizability(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for same calc
+ prop = calc.get_hyperpolarizability(pos, **kwargs)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # cache is used for energy (kwargs mess up the cache key!)
+ prop = calc.get_energy(pos)
+ assert calc._ncalcs == 1
+ assert isinstance(prop, Tensor)
+
+ # check reset
+ calc.cache.reset_all()
+ assert len(calc.cache.list_cached_properties()) == 0
+
+
@pytest.mark.skipif(not has_libcint, reason="libcint not available")
@pytest.mark.parametrize("dtype", [torch.float, torch.double])
def test_ir(dtype: torch.dtype) -> None:
diff --git a/test/test_config/test_main.py b/test/test_config/test_main.py
new file mode 100644
index 00000000..8bd96a85
--- /dev/null
+++ b/test/test_config/test_main.py
@@ -0,0 +1,107 @@
+# This file is part of dxtb.
+#
+# SPDX-Identifier: Apache-2.0
+# Copyright (C) 2024 Grimme Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Test integral configuration.
+"""
+
+from __future__ import annotations
+
+import pytest
+
+from dxtb._src.constants import defaults, labels
+from dxtb._src.exlibs.available import has_libcint
+from dxtb._src.typing import get_default_device, get_default_dtype
+from dxtb.config import Config as Cfg
+
+
+def test_default() -> None:
+ cfg = Cfg()
+ assert cfg.strict == defaults.STRICT
+ assert cfg.exclude == defaults.EXCLUDE
+ assert cfg.method == defaults.METHOD
+ assert cfg.grad == False
+ assert cfg.batch_mode == defaults.BATCH_MODE
+
+ assert cfg.ints.cutoff == defaults.INTCUTOFF
+ assert cfg.ints.driver == defaults.INTDRIVER
+ assert cfg.ints.level == defaults.INTLEVEL
+ assert cfg.ints.uplo == defaults.INTUPLO
+
+ assert cfg.anomaly == False
+ assert cfg.device == get_default_device()
+ assert cfg.dtype == get_default_dtype()
+
+ assert cfg.scf.maxiter == defaults.MAXITER
+ assert cfg.scf.mixer == defaults.MIXER
+ assert cfg.scf.damp == defaults.DAMP
+ assert cfg.scf.guess == defaults.GUESS
+ assert cfg.scf.scf_mode == defaults.SCF_MODE
+ assert cfg.scf.scp_mode == defaults.SCP_MODE
+ assert cfg.scf.x_atol == defaults.X_ATOL
+ assert cfg.scf.f_atol == defaults.F_ATOL
+ assert cfg.scf.force_convergence == False
+
+ assert cfg.scf.fermi.etemp == defaults.FERMI_ETEMP
+ assert cfg.scf.fermi.maxiter == defaults.FERMI_MAXITER
+ assert cfg.scf.fermi.thresh == defaults.FERMI_THRESH
+ assert cfg.scf.fermi.partition == defaults.FERMI_PARTITION
+
+ assert cfg.cache.enabled == defaults.CACHE_ENABLED
+ assert cfg.cache.store.hcore == defaults.CACHE_STORE_HCORE
+ assert cfg.cache.store.overlap == defaults.CACHE_STORE_OVERLAP
+ assert cfg.cache.store.dipole == defaults.CACHE_STORE_DIPOLE
+ assert cfg.cache.store.quadrupole == defaults.CACHE_STORE_QUADRUPOLE
+ assert cfg.cache.store.charges == defaults.CACHE_STORE_CHARGES
+ assert cfg.cache.store.coefficients == defaults.CACHE_STORE_COEFFICIENTS
+ assert cfg.cache.store.density == defaults.CACHE_STORE_DENSITY
+ assert cfg.cache.store.fock == defaults.CACHE_STORE_FOCK
+ assert cfg.cache.store.iterations == defaults.CACHE_STORE_ITERATIONS
+ assert cfg.cache.store.mo_energies == defaults.CACHE_STORE_MO_ENERGIES
+ assert cfg.cache.store.occupation == defaults.CACHE_STORE_OCCUPATIONS
+ assert cfg.cache.store.potential == defaults.CACHE_STORE_POTENTIAL
+
+ assert cfg.max_element == defaults.MAX_ELEMENT
+
+
+def test_method() -> None:
+ cfg = Cfg(method=labels.GFN1_XTB_STRS[0])
+ assert cfg.method == labels.GFN1_XTB
+
+ cfg = Cfg(method=labels.GFN1_XTB)
+ assert cfg.method == labels.GFN1_XTB
+
+ cfg = Cfg(method=labels.GFN2_XTB_STRS[0])
+ assert cfg.method == labels.GFN2_XTB
+
+ cfg = Cfg(method=labels.GFN2_XTB)
+ assert cfg.method == labels.GFN2_XTB
+
+
+def test_method_fail() -> None:
+ with pytest.raises(ValueError):
+ Cfg(method="invalid")
+
+ with pytest.raises(ValueError):
+ Cfg(method=-999)
+
+ with pytest.raises(TypeError):
+ Cfg(method=1.0) # type: ignore
+
+
+def test_fail_incompatibility() -> None:
+ with pytest.raises(RuntimeError):
+ Cfg(method=labels.GFN2_XTB, int_driver=labels.INTDRIVER_LEGACY)
From ec2e50b319c0f84943b08f86c475b196fa11ed5e Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Tue, 17 Sep 2024 07:40:54 -0500
Subject: [PATCH 10/12] Use labels as defaults, not strings
---
src/dxtb/_src/calculators/config/integral.py | 1 +
src/dxtb/_src/calculators/config/main.py | 2 +-
src/dxtb/_src/calculators/config/scf.py | 2 +-
src/dxtb/_src/constants/defaults.py | 12 ++++++------
test/test_config/test_main.py | 1 -
5 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/src/dxtb/_src/calculators/config/integral.py b/src/dxtb/_src/calculators/config/integral.py
index 4ae09545..c0cb8a04 100644
--- a/src/dxtb/_src/calculators/config/integral.py
+++ b/src/dxtb/_src/calculators/config/integral.py
@@ -109,6 +109,7 @@ def __init__(
labels.INTDRIVER_LIBCINT,
labels.INTDRIVER_ANALYTICAL,
labels.INTDRIVER_AUTOGRAD,
+ labels.INTDRIVER_LEGACY,
):
raise ValueError(f"Unknown integral driver '{driver}'.")
diff --git a/src/dxtb/_src/calculators/config/main.py b/src/dxtb/_src/calculators/config/main.py
index ed9f0723..802eed1c 100644
--- a/src/dxtb/_src/calculators/config/main.py
+++ b/src/dxtb/_src/calculators/config/main.py
@@ -103,7 +103,7 @@ def __init__(
dtype: torch.dtype = get_default_dtype(),
# SCF
maxiter: int = defaults.MAXITER,
- mixer: str = defaults.MIXER,
+ mixer: str | int = defaults.MIXER,
damp: float = defaults.DAMP,
guess: str | int = defaults.GUESS,
scf_mode: str | int = defaults.SCF_MODE,
diff --git a/src/dxtb/_src/calculators/config/scf.py b/src/dxtb/_src/calculators/config/scf.py
index 10c12c3d..fe071519 100644
--- a/src/dxtb/_src/calculators/config/scf.py
+++ b/src/dxtb/_src/calculators/config/scf.py
@@ -99,7 +99,7 @@ def __init__(
strict: bool = False,
guess: str | int = defaults.GUESS,
maxiter: int = defaults.MAXITER,
- mixer: str = defaults.MIXER,
+ mixer: str | int = defaults.MIXER,
damp: float = defaults.DAMP,
scf_mode: str | int = defaults.SCF_MODE,
scp_mode: str | int = defaults.SCP_MODE,
diff --git a/src/dxtb/_src/constants/defaults.py b/src/dxtb/_src/constants/defaults.py
index 449c53e3..ad90539b 100644
--- a/src/dxtb/_src/constants/defaults.py
+++ b/src/dxtb/_src/constants/defaults.py
@@ -52,7 +52,7 @@
EINSUM_OPTIMIZE = "greedy"
"""Optimization algorithm for `einsum`."""
-METHOD = "gfn1"
+METHOD = labels.GFN1_XTB
"""General method for calculation from the xtb family."""
METHOD_CHOICES = ["gfn1", "gfn1-xtb", "gfn2", "gfn2-xtb"]
@@ -118,7 +118,7 @@
# SCF settings
-GUESS = "eeq"
+GUESS = labels.GUESS_EEQ
"""Initial guess for orbital charges."""
GUESS_CHOICES = ["eeq", "sad"]
@@ -130,13 +130,13 @@
MAXITER = 100
"""Maximum number of SCF iterations."""
-MIXER = "broyden"
+MIXER = labels.MIXER_BROYDEN
"""SCF mixing scheme for convergence acceleration."""
MIXER_CHOICES = ["anderson", "broyden", "simple"]
"""List of possible choices for ``MIXER``."""
-SCF_MODE = "nonpure"
+SCF_MODE = labels.SCF_MODE_IMPLICIT_NON_PURE
"""
Whether to use full gradient tracking in SCF, make use of the implicit
function theorem as provided by ``xitorch.optimize.equilibrium``, or use the
@@ -156,7 +156,7 @@
]
"""List of possible choices for ``SCF_MODE``."""
-SCP_MODE = "potential"
+SCP_MODE = labels.SCP_MODE_POTENTIAL
"""
Type of self-consistent parameter, i.e., which quantity is converged in the SCF
iterations.
@@ -207,7 +207,7 @@
}
"""Convergence thresholds for different float data types."""
-FERMI_PARTITION = "equal"
+FERMI_PARTITION = labels.FERMI_PARTITION_EQUAL
"""Partitioning scheme for electronic free energy."""
FERMI_PARTITION_CHOICES = ["equal", "atomic"]
diff --git a/test/test_config/test_main.py b/test/test_config/test_main.py
index 8bd96a85..e57f117a 100644
--- a/test/test_config/test_main.py
+++ b/test/test_config/test_main.py
@@ -23,7 +23,6 @@
import pytest
from dxtb._src.constants import defaults, labels
-from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import get_default_device, get_default_dtype
from dxtb.config import Config as Cfg
From 244378c2296b4da2c0b3945705daeec55c05e4c0 Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Tue, 17 Sep 2024 08:42:07 -0500
Subject: [PATCH 11/12] Fix tests
---
src/dxtb/_src/cli/driver.py | 4 ++--
test/test_cli/test_args.py | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/dxtb/_src/cli/driver.py b/src/dxtb/_src/cli/driver.py
index 6ce29767..de4e9a68 100644
--- a/src/dxtb/_src/cli/driver.py
+++ b/src/dxtb/_src/cli/driver.py
@@ -188,10 +188,10 @@ def singlepoint(self) -> Result | Tensor:
if args.grad is True:
positions.requires_grad = True
- if args.method.lower() == "gfn1" or args.method.lower() == "gfn1-xtb":
+ if config.method == labels.GFN1_XTB:
# pylint: disable=import-outside-toplevel
from dxtb import GFN1_XTB as par
- elif args.method.lower() == "gfn2" or args.method.lower() == "gfn2-xtb":
+ elif config.method == labels.GFN2_XTB:
raise NotImplementedError("GFN2-xTB is not implemented yet.")
else:
raise ValueError(f"Unknown method '{args.method}'.")
diff --git a/test/test_cli/test_args.py b/test/test_cli/test_args.py
index 81b2f0f6..cc0f2ab7 100644
--- a/test/test_cli/test_args.py
+++ b/test/test_cli/test_args.py
@@ -48,7 +48,7 @@ def test_defaults() -> None:
assert isinstance(args.maxiter, int)
assert args.maxiter == defaults.MAXITER
- assert isinstance(args.guess, str)
+ assert isinstance(args.guess, int)
assert args.guess == defaults.GUESS
assert isinstance(args.fermi_etemp, float)
@@ -57,7 +57,7 @@ def test_defaults() -> None:
assert isinstance(args.fermi_maxiter, int)
assert args.fermi_maxiter == defaults.FERMI_MAXITER
- assert isinstance(args.fermi_partition, str)
+ assert isinstance(args.fermi_partition, int)
assert args.fermi_partition == defaults.FERMI_PARTITION
# integral settings
From 35d5fc2d61e3a6fdbc4a882f4d4fbf936678502b Mon Sep 17 00:00:00 2001
From: marvinfriede <51965259+marvinfriede@users.noreply.github.com>
Date: Tue, 17 Sep 2024 09:22:08 -0500
Subject: [PATCH 12/12] More fixes
---
test/test_config/test_main.py | 22 +++++++++++++++++-----
1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/test/test_config/test_main.py b/test/test_config/test_main.py
index e57f117a..d2ee38e7 100644
--- a/test/test_config/test_main.py
+++ b/test/test_config/test_main.py
@@ -23,6 +23,7 @@
import pytest
from dxtb._src.constants import defaults, labels
+from dxtb._src.exlibs.available import has_libcint
from dxtb._src.typing import get_default_device, get_default_dtype
from dxtb.config import Config as Cfg
@@ -36,7 +37,6 @@ def test_default() -> None:
assert cfg.batch_mode == defaults.BATCH_MODE
assert cfg.ints.cutoff == defaults.INTCUTOFF
- assert cfg.ints.driver == defaults.INTDRIVER
assert cfg.ints.level == defaults.INTLEVEL
assert cfg.ints.uplo == defaults.INTUPLO
@@ -75,6 +75,11 @@ def test_default() -> None:
assert cfg.max_element == defaults.MAX_ELEMENT
+ if has_libcint is True:
+ assert cfg.ints.driver == defaults.INTDRIVER
+ else:
+ assert cfg.ints.driver == labels.INTDRIVER_ANALYTICAL
+
def test_method() -> None:
cfg = Cfg(method=labels.GFN1_XTB_STRS[0])
@@ -83,11 +88,18 @@ def test_method() -> None:
cfg = Cfg(method=labels.GFN1_XTB)
assert cfg.method == labels.GFN1_XTB
- cfg = Cfg(method=labels.GFN2_XTB_STRS[0])
- assert cfg.method == labels.GFN2_XTB
+ if has_libcint is True:
+ cfg = Cfg(method=labels.GFN2_XTB)
+ assert cfg.method == labels.GFN2_XTB
+
+ cfg = Cfg(method=labels.GFN2_XTB_STRS[0])
+ assert cfg.method == labels.GFN2_XTB
+ else:
+ with pytest.raises(RuntimeError):
+ Cfg(method=labels.GFN2_XTB_STRS[0])
- cfg = Cfg(method=labels.GFN2_XTB)
- assert cfg.method == labels.GFN2_XTB
+ with pytest.raises(RuntimeError):
+ Cfg(method=labels.GFN2_XTB)
def test_method_fail() -> None: