diff --git a/seemps/cross/__init__.py b/seemps/cross/__init__.py new file mode 100644 index 00000000..fb32630e --- /dev/null +++ b/seemps/cross/__init__.py @@ -0,0 +1,2 @@ +from .cross import CrossStrategy, cross_interpolation, reorder_tensor +from .mesh import * diff --git a/seemps/cross/cross.py b/seemps/cross/cross.py new file mode 100644 index 00000000..aade036e --- /dev/null +++ b/seemps/cross/cross.py @@ -0,0 +1,384 @@ +from dataclasses import dataclass +import numpy as np +from copy import copy, deepcopy +from typing import Callable, List, Optional, Tuple +from .maxvol import maxvol_sqr, maxvol_rct +from .mesh import Mesh +from ..tools import log +from ..state import MPS, random_mps + + +@dataclass +class CrossStrategy: + """Parameters for the Tensor Cross Interpolation algorithm. + + Parameters + ---------- + tol : float + Maximum error allowed by the algorithm. + maxiter : int + Maximum number of sweeps allowed by the algorithm. + maxrank : int + Maximum bond dimension of the MPS allowed by the algorithm. + mps_ordering : str + Site ordering of the underlying MPS (assumed binary). Either "A" or "B". + maxvol_sqr_tau : float + Sensibility parameter for the square maxvol algorithm. + maxvol_sqr_maxiter : int + Maximum iterations for the square maxvol algorithm. + maxvol_rct_tau : float + Sensibility parameter for the rectangular maxvol algorithm. + maxvol_rct_minrank : int + Minimum allowed rank change introduced by the rectangular maxvol algorithm. + maxvol_rct_maxrank : int + Maximum allowed rank change introduced by the rectangular maxvol algorithm. + error_type : str + Method used to measure the error of the algorithm. Either "sampling" or "norm". + """ + + tol: float = 1e-10 + maxiter: int = 100 + maxrank: int = 100 + mps_ordering: str = "A" + maxvol_sqr_tau: float = 1.05 + maxvol_sqr_maxiter: int = 100 + maxvol_rct_tau: float = 1.10 + maxvol_rct_minrank: int = 1 + maxvol_rct_maxrank: int = 1 + error_type: str = "sampling" + + +@dataclass +class Cross: + """Cross class. + + This implements a data structure for the Tensor Cross Interpolation algorithm, + which encodes a function defined on a discretized mesh on a Matrix Product + State (MPS) by means of the skeleton decomposition. + + Parameters + ---------- + func : Callable + A multidimensional **vector-valued** function to be encoded in a MPS. + mesh : Mesh + A multidimensional discretized mesh on which the function is defined, defining + an implicit tensor. + mps : MPS + An initial MPS with the same size as the mesh to serve as an initial approximation + for the algorithm. Can be of 'binary' ([2] * dims * n) or 'tt' ([2**n] * dims) structure. + cross_strategy : CrossStrategy + An object which contains the algorithm parameters. + """ + + func: Callable + mesh: Mesh + mps: MPS + cross_strategy: CrossStrategy + + def __post_init__(self): + shape_mps = tuple(self.mps.physical_dimensions()) + shape_mesh = self.mesh.shape()[:-1] + if np.prod(shape_mps) == np.prod(shape_mesh) and all( + dim == 2 for dim in shape_mps + ): + self.structure = "binary" + elif shape_mps == shape_mesh: + self.structure = "tt" + else: + raise ValueError("Non-matching mesh and initial MPS") + self.sites = len(self.mps) + self.qubits = [int(np.log2(s)) for s in shape_mesh] + + +def _initialize(cross: Cross) -> None: + """Initializes the Cross dataclass multi-indices and attributes executing a presweep + on the initial MPS without evaluating the function and using the square maxvol algorithm + without rank increments.""" + cross.I_physical = [ + np.arange(k, dtype=int).reshape(-1, 1) for k in cross.mps.physical_dimensions() + ] + cross.I_forward = [None for _ in range(cross.sites + 1)] + cross.I_backward = [None for _ in range(cross.sites + 1)] + cross.error = 1 + cross.sweep = 0 + cross.maxrank = 0 + + cross_initial = copy(cross) + cross_initial.cross_strategy = CrossStrategy( + maxvol_rct_minrank=0, maxvol_rct_maxrank=0 + ) + + # Forward pass + R = np.ones((1, 1)) + for j in range(cross.sites): + fiber = np.tensordot(R, cross.mps[j], 1) + cross.mps[j], cross.I_forward[j + 1], R = _skeleton( + fiber, cross_initial, j, ltr=True + ) + cross.mps[cross.sites - 1] = np.tensordot(cross.mps[cross.sites - 1], R, 1) + + # Backward pass + R = np.ones((1, 1)) + for j in range(cross.sites - 1, -1, -1): + fiber = np.tensordot(cross.mps[j], R, 1) + cross.mps[j], cross.I_backward[j], R = _skeleton( + fiber, cross_initial, j, ltr=False + ) + cross.mps[0] = np.tensordot(R, cross.mps[0], 1) + + +def _sweep(cross: Cross) -> None: + """Runs a forward-backward sweep on the MPS that iteratively updates its tensors and + forward / backward multi-indices (pivots) by means of the skeleton decomposition. + """ + # Forward pass + R = np.ones((1, 1)) + for j in range(cross.sites): + fiber = _sample(cross, j) + cross.mps[j], cross.I_forward[j + 1], R = _skeleton(fiber, cross, j, ltr=True) + cross.mps[cross.sites - 1] = np.tensordot(cross.mps[cross.sites - 1], R, 1) + + # Backward pass + R = np.ones((1, 1)) + for j in range(cross.sites - 1, -1, -1): + fiber = _sample(cross, j) + cross.mps[j], cross.I_backward[j], R = _skeleton(fiber, cross, j, ltr=False) + cross.mps[0] = np.tensordot(R, cross.mps[0], 1) + + cross.sweep += 1 + cross.maxrank = max(cross.mps.bond_dimensions()) + + +# TODO: Clean and optimize +def _skeleton( + fiber: np.ndarray, + cross: Cross, + j: int, + ltr: bool, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Returns the skeleton decomposition of a tensor fiber using either the square + or rectangular (rank-increasing) maxvol algorithm. + """ + r1, s, r2 = fiber.shape + + # Reshape the fiber into a matrix + if ltr: + i_virtual = cross.I_forward[j] + fiber = np.reshape(fiber, (r1 * s, r2), order="F") + else: + i_virtual = cross.I_backward[j + 1] + fiber = np.reshape(fiber, (r1, s * r2), order="F").T + + # Perform QR factorization + Q, R = np.linalg.qr(fiber) + + # Perform maxvol decomposition on the Q-factor + if Q.shape[0] <= Q.shape[1]: + i_maxvol = np.arange(Q.shape[0], dtype=int) + Q_maxvol = np.eye(Q.shape[0], dtype=float) + elif cross.cross_strategy.maxvol_rct_maxrank == 0: + i_maxvol, Q_maxvol = maxvol_sqr( + Q, + k=cross.cross_strategy.maxvol_sqr_maxiter, + e=cross.cross_strategy.maxvol_sqr_tau, + ) + else: + i_maxvol, Q_maxvol = maxvol_rct( + Q, + k=cross.cross_strategy.maxvol_sqr_maxiter, + e=cross.cross_strategy.maxvol_sqr_tau, + tau=cross.cross_strategy.maxvol_rct_tau, + min_r=cross.cross_strategy.maxvol_rct_minrank, + max_r=cross.cross_strategy.maxvol_rct_maxrank, + ) + + # Redefine the fiber as the decomposed Q-factor + i_physical = cross.I_physical[j] + if ltr: + i_physical_ext = np.kron(i_physical, np.ones((r1, 1), dtype=int)) + fiber = np.reshape(Q_maxvol, (r1, s, -1), order="F") + R = Q[i_maxvol, :] @ R + else: + i_physical_ext = np.kron(np.ones((r2, 1), dtype=int), i_physical) + fiber = np.reshape(Q_maxvol.T, (-1, s, r2), order="F") + R = (Q[i_maxvol, :] @ R).T + + # Redefine the maxvol indices in terms of the tensor multi-indices + if i_virtual is not None: + i_virtual_ext = ( + np.kron(np.ones((s, 1), dtype=int), i_virtual) + if ltr + else np.kron(i_virtual, np.ones((s, 1), dtype=int)) + ) + i_physical_ext = ( + np.hstack((i_virtual_ext, i_physical_ext)) + if ltr + else np.hstack((i_physical_ext, i_virtual_ext)) + ) + i_maxvol = i_physical_ext[i_maxvol, :] + + return fiber, i_maxvol, R + + +def _sample(cross: Cross, j: int) -> np.ndarray: + """Returns a fiber of the implicit tensor along the site j by evaluating the function at the + kronecker product of the physical indices and the forward and backward maxvol multi-indices + (pivots) that are present at that site. + """ + i_physical = cross.I_physical[j] + i_forward = cross.I_forward[j] + i_backward = cross.I_backward[j + 1] + r1 = i_forward.shape[0] if i_forward is not None else 1 + s = i_physical.shape[0] + r2 = i_backward.shape[0] if i_backward is not None else 1 + indices = np.kron( + np.kron(np.ones((r2, 1), dtype=int), i_physical), + np.ones((r1, 1), dtype=int), + ) + if i_forward is not None: + indices = np.hstack( + (np.kron(np.ones((s * r2, 1), dtype=int), i_forward), indices) + ) + if i_backward is not None: + indices = np.hstack( + (indices, np.kron(i_backward, np.ones((r1 * s, 1), dtype=int))) + ) + fiber = _evaluate(cross, indices) + return fiber.reshape((r1, s, r2), order="F") + + +def _evaluate(cross: Cross, indices: np.ndarray) -> np.ndarray: + """Evaluates the function at a tensor of indices.""" + if cross.structure == "binary": + indices = _binary2decimal( + indices, cross.qubits, cross.cross_strategy.mps_ordering + ) + return np.array([cross.func(cross.mesh[idx]) for idx in indices]) + + +# TODO: Clean and optimize +def _binary2decimal( + indices: np.ndarray, qubits: List[int], mps_ordering: str +) -> np.ndarray: + """Transforms a tensor of multi-indices in binary form to decimal form + which can be used to evaluate function values. Follows the MPS ordering + ("A" or "B") specified on the mps_ordering parameter of the strategy.""" + + def bitlist_to_int(bitlist): + out = 0 + for bit in bitlist: + out = (out << 1) | bit + return out + + m = len(qubits) + decimal_indices = [] + for idx, n in enumerate(qubits): + if mps_ordering == "A": + rng = np.arange(idx * n, (idx + 1) * n) + elif mps_ordering == "B": + rng = np.arange(idx, m * n, m) + else: + raise ValueError("Invalid mps_ordering") + decimal_ndx = bitlist_to_int(indices.T[rng]) + decimal_indices.append(decimal_ndx) + decimal_indices = np.column_stack(decimal_indices) + return decimal_indices + + +def _error_sampling(cross: Cross, sampling_points: int = 1000) -> float: + """Returns the algorithm error given by comparing random samples with their exact values.""" + if cross.sweep == 1: + cross.sampling_indices = np.vstack( + [ + np.random.choice(k, sampling_points) + for k in cross.mps.physical_dimensions() + ] + ).T + cross.samples = _evaluate(cross, cross.sampling_indices) + Q = cross.mps[0][0, cross.sampling_indices[:, 0], :] + for i in range(1, cross.sites): + Q = np.einsum("kq,qkr->kr", Q, cross.mps[i][:, cross.sampling_indices[:, i], :]) + error = np.linalg.norm(Q[:, 0] - cross.samples) / np.linalg.norm(cross.samples) + return error + + +def _error_norm2(cross: Cross) -> float: + """Returns the algorithm error given by evaluating the norm of its difference with respect to + the previous sweep.""" + if cross.sweep == 1: + cross.mps_prev = deepcopy(cross.mps) + return 1 + error = abs((cross.mps - cross.mps_prev).toMPS().norm()) + cross.mps_prev = cross.mps + return error + + +def _converged(cross: Cross) -> bool: + """Evaluates the convergence of the algorithm as defined by the strategy parameters.""" + return ( + cross.error < cross.cross_strategy.tol + or cross.sweep >= cross.cross_strategy.maxiter + or cross.maxrank >= cross.cross_strategy.maxrank + ) + + +def reorder_tensor(tensor: np.ndarray, qubits: List[int]) -> np.ndarray: + """Reorders an A-ordered tensor into a B-ordered tensor (and the other way around).""" + m = len(qubits) + shape_orig = tensor.shape + tensor = tensor.reshape([2] * sum(qubits)) + axes = [np.arange(idx, m * n, m) for idx, n in enumerate(qubits)] + axes = [item for items in axes for item in items] + tensor = np.transpose(tensor, axes=axes) + return tensor.reshape(shape_orig) + + +def cross_interpolation( + func: Callable, + mesh: Mesh, + mps: Optional[MPS] = None, + cross_strategy: CrossStrategy = CrossStrategy(), +) -> MPS: + """Tensor Cross Interpolation algorithm. + + This runs the Tensor Cross Interpolation algorithm in order to encode a black-box tensor + given by a vector-valued function and a multidimensional mesh on a Matrix Product State (MPS). + + Parameters + ---------- + func : Callable + A multidimensional **vector-valued** function to be encoded in MPS form. + mesh : Mesh + A multidimensional discretized mesh on which the function is defined. + mps : MPS + An initial MPS with the same dimensions as the mesh to serve as an initial approximation. + cross_strategy : CrossStrategy + An object which contains the algorithm parameters. + """ + if mps is None: + if not all((s != 0) and (s & (s - 1) == 0) for s in mesh.shape()[:-1]): + raise ValueError("The mesh size must be a power of two") + sites = sum([int(np.log2(s)) for s in mesh.shape()[:-1]]) + mps = random_mps([2] * sites, 1, rng=np.random.default_rng(42)) + + cross = Cross(func, mesh, mps, cross_strategy) + _initialize(cross) + + while not _converged(cross): + _sweep(cross) + if cross_strategy.error_type == "sampling": + error_name = "Sampling error" + cross.error = _error_sampling(cross) + elif cross_strategy.error_type == "norm": + error_name = "Norm error" + cross.error = _error_norm2(cross) + else: + raise ValueError("Invalid error_type") + + log( + f"Sweep {cross.sweep:<3} | " + + f"Max χ {cross.maxrank:>3} | " + + f"{error_name} {cross.error:.2E}" + ) + + return cross.mps diff --git a/seemps/cross/maxvol.py b/seemps/cross/maxvol.py new file mode 100644 index 00000000..e5201913 --- /dev/null +++ b/seemps/cross/maxvol.py @@ -0,0 +1,109 @@ +from typing import Tuple +import numpy as np +from scipy.linalg import lu, solve_triangular + + +def maxvol_sqr( + A: np.ndarray, k: int = 100, e: float = 1.05 +) -> Tuple[np.ndarray, np.ndarray]: + """ + Square maxvol algorithm. + Given a 'tall matrix' of size m x n (with more rows m than columns n), finds the n rows that + form a submatrix with approximately the largest volume (absolute value of determinant). + + Parameters + ---------- + A : np.ndarray + A tall matrix of size m x n (m > n) to be optimized. + k : int + The maximum iterations allowed for the algorithm to converge. + e : float + The sensitivity of the algorithm (e >= 1). + + Output + ------ + I : np.ndarray + An array with the indices of the n rows that approximate the submatrix with largest volume. + B : np.ndarray + The square submatrix of size n x n of A with approximately largest volume. + """ + n, r = A.shape + if n <= r: + raise ValueError('Input matrix should be "tall"') + P, L, U = lu(A, check_finite=False) + I = P[:, :r].argmax(axis=0) + Q = solve_triangular(U, A.T, trans=1, check_finite=False) + B = solve_triangular( + L[:r, :], Q, trans=1, check_finite=False, unit_diagonal=True, lower=True + ).T + for _ in range(k): + i, j = np.divmod(np.abs(B).argmax(), r) + if np.abs(B[i, j]) <= e: + break + I[j] = i + bj = B[:, j] + bi = B[i, :].copy() + bi[j] -= 1.0 + B -= np.outer(bj, bi / B[i, j]) + return I, B + + +def maxvol_rct( + A: np.ndarray, + k: int = 100, + e: float = 1.05, + tau: float = 1.10, + min_r: int = 0, + max_r: int = 1, +) -> Tuple[np.ndarray, np.ndarray]: + """ + Rectangular maxvol algorithm. + Given a 'tall matrix' of size m x n (with more rows m than columns n), finds the \tilde{n} > n rows + that form a submatrix with approximately the largest volume (absolute value of determinant). + + Parameters + ---------- + A : np.ndarray + A tall matrix of size m x n (m > n) to be optimized. + k : int + The maximum iterations allowed for the square maxvol algorithm to converge. + e : float + The sensitivity of the square maxvol algorithm (e >= 1). + tau : float + The sensitivity of the rectangular maxvol algorithm (tau >= 1). + min_r : int + The minimum rank increment (added rows) introduced by the rectangular maxvol algorithm. + max_r : int + The maximum rank increment (added rows) introduced by the rectangular maxvol algorithm. + + Output + ------ + I : np.ndarray + An array with the indices of the \tilde{n} rows that approximate the submatrix with largest volume. + B : np.ndarray + The rectangular submatrix of size \tilde{n} x n of A with approximately largest volume. + """ + n, r = A.shape + r_min = r + min_r + r_max = r + max_r if max_r is not None else n + r_max = min(r_max, n) + if r_min < r or r_min > r_max or r_max > n: + raise ValueError("Invalid minimum/maximum number of added rows") + I0, B = maxvol_sqr(A, k, e) + I = np.hstack([I0, np.zeros(r_max - r, dtype=I0.dtype)]) + S = np.ones(n, dtype=int) + S[I0] = 0 + F = S * np.linalg.norm(B, axis=1) ** 2 + for k in range(r, r_max): + i = np.argmax(F) + if k >= r_min and F[i] <= tau**2: + break + I[k] = i + S[i] = 0 + v = B.dot(B[i]) + l = 1.0 / (1 + v[i]) + B = np.hstack([B - l * np.outer(v, B[i]), l * v.reshape(-1, 1)]) + F = S * (F - l * v * v) + I = I[: B.shape[1]] + B[I] = np.eye(B.shape[1], dtype=B.dtype) + return I, B diff --git a/seemps/cross/mesh.py b/seemps/cross/mesh.py new file mode 100644 index 00000000..0a2a8cda --- /dev/null +++ b/seemps/cross/mesh.py @@ -0,0 +1,92 @@ +from abc import ABC, abstractmethod +from typing import List, Tuple +import numpy as np +from itertools import product + + +class Interval(ABC): + """Interval Abstract Base Class. + + This abstracts an Interval object, which represents implicitly an + interval discretized along N points within two endpoints start and stop. + """ + + def __init__(self, start: float, stop: float, size: int): + self.start = start + self.stop = stop + self.size = size + + @abstractmethod + def __getitem__(self, idx: int) -> float: + if not (0 <= idx < self.size): + raise IndexError("Index out of range") + + def to_vector(self) -> np.ndarray: + return np.array([self[idx] for idx in range(self.size)]) + + +class RegularClosedInterval(Interval): + """Equispaced discretization between [start, stop].""" + + def __init__(self, start: float, stop: float, size: int): + super().__init__(start, stop, size) + self.step = (stop - start) / (size - 1) + + def __getitem__(self, idx: int) -> float: + super().__getitem__(idx) + return idx * self.step + self.start + + +class RegularHalfOpenInterval(Interval): + """Equispaced discretization between [start, stop).""" + + def __init__(self, start: float, stop: float, size: int): + super().__init__(start, stop, size) + self.step = (stop - start) / size + + def __getitem__(self, idx: int) -> float: + super().__getitem__(idx) + return idx * self.step + self.start + + +class ChebyshevZerosInterval(Interval): + """Irregular discretization given by an affine map between the + zeros of the N-th Chebyshev polynomial in [-1, 1] to (start, stop).""" + + def __init__(self, start: float, stop: float, size: int): + super().__init__(start, stop, size) + + def __getitem__(self, idx: int) -> float: + super().__getitem__(idx) + zero = np.cos(np.pi * (2 * (self.size - idx) - 1) / (2 * self.size)) + return (self.stop - self.start) * (zero + 1) / 2 + self.start + + +class Mesh: + """Multidimensional mesh object. + + This represents a multidimensional mesh which can be understood as the + implicit tensor given by the cartesian product of a collection of intervals. + + Parameters + ---------- + intervals : List[Interval] + A list of Interval objects representing the discretizations along each dimension. + """ + + def __init__(self, intervals: List[Interval]): + self.intervals = intervals + self.dimension = len(intervals) + + def __getitem__(self, indices: Tuple[int, ...]): + if len(indices) != self.dimension: + raise ValueError("Incorrect index size") + return np.array( + [interval[idx] for interval, idx in zip(self.intervals, indices)] + ) + + def shape(self): + return tuple(interval.size for interval in self.intervals) + (self.dimension,) + + def to_tensor(self): + return np.array(list(product(*self.intervals))).reshape(self.shape()) diff --git a/tests/test_cross.py b/tests/test_cross.py new file mode 100644 index 00000000..3cbb05e9 --- /dev/null +++ b/tests/test_cross.py @@ -0,0 +1,141 @@ +import numpy as np +from seemps.cross import ( + Mesh, + CrossStrategy, + cross_interpolation, + reorder_tensor, + RegularClosedInterval, + RegularHalfOpenInterval, + ChebyshevZerosInterval, +) +from seemps.state import MPS + +from .tools import TestCase + +""" +Issues (TODO): + 1. The simplification routine simplify() used to truncate the resulting + MPS changes them a lot and gives incorrect results. +""" + + +class TestCross(TestCase): + @staticmethod + def gaussian_setting(dims, structure="binary"): + a = -1 + b = 1 + n = 5 + func = lambda x: np.exp(-np.sum(x**2)) + intervals = [RegularHalfOpenInterval(a, b, 2**n) for _ in range(dims)] + mesh = Mesh(intervals) + mesh_tensor = mesh.to_tensor() + func_vector = np.apply_along_axis(func, -1, mesh_tensor).flatten() + if structure == "binary": + mps = MPS.from_vector(func_vector, [2] * (n * dims), normalize=False) + elif structure == "tt": + mps = MPS.from_vector(func_vector, [2**n] * dims, normalize=False) + return func, mesh, mps, func_vector + + # 1D Gaussian + def test_cross_1d_from_random(self): + func, mesh, _, func_vector = self.gaussian_setting(1) + mps = cross_interpolation(func, mesh) + self.assertSimilar(func_vector, mps.to_vector()) + + def test_cross_1d_from_mps(self): + func, mesh, mps0, func_vector = self.gaussian_setting(1) + mps = cross_interpolation(func, mesh, mps=mps0) + self.assertSimilar(func_vector, mps.to_vector()) + + def test_cross_1d_with_measure_norm(self): + cross_strategy = CrossStrategy(error_type="norm") + func, mesh, _, func_vector = self.gaussian_setting(1) + mps = cross_interpolation(func, mesh, cross_strategy=cross_strategy) + self.assertSimilar(func_vector, mps.to_vector()) + + # FAILS + # def test_cross_1d_simplified(self): + # func, mesh, _, _ = self.gaussian_setting(1) + # mps = cross_interpolation(func, mesh) + # mps_simplified = simplify(mps) + # self.assertSimilar(mps_simplified.to_vector(), mps.to_vector()) + + # 2D Gaussian + def test_cross_2d_from_random(self): + func, mesh, _, func_vector = self.gaussian_setting(2) + mps = cross_interpolation(func, mesh) + self.assertSimilar(func_vector, mps.to_vector()) + + def test_cross_2d_with_ordering_B(self): + cross_strategy = CrossStrategy(mps_ordering="B") + func, mesh, _, func_vector = self.gaussian_setting(2) + mps = cross_interpolation(func, mesh, cross_strategy=cross_strategy) + qubits = [int(np.log2(s)) for s in mesh.shape()[:-1]] + tensor = reorder_tensor(mps.to_vector(), qubits) + self.assertSimilar(func_vector, tensor) + + def test_cross_2d_with_structure_tt(self): + func, mesh, mps0, func_vector = self.gaussian_setting(2, structure="tt") + mps = cross_interpolation(func, mesh, mps=mps0) + self.assertSimilar(func_vector, mps.to_vector()) + + +class TestMesh(TestCase): + def test_regular_closed_interval(self): + interval = RegularClosedInterval(0.0, 10, 11) + assert interval[5] == 5 + vector = interval.to_vector() + assert np.all(vector == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) + + def test_regular_open_interval(self): + interval = RegularHalfOpenInterval(0.0, 10, 10) + assert interval[5] == 5 + vector = interval.to_vector() + assert np.all(vector == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])) + + def test_chebyshev_zeros_interval(self): + interval = ChebyshevZerosInterval(-1.0, 1.0, 3) + value = np.sqrt(3) / 2 + assert np.isclose(interval[0], -value) + vector = interval.to_vector() + assert np.allclose(vector, np.array([-value, 0, value])) + + def test_rescaled_chebyshev_zeros_interval(self): + f = lambda x: 2 * x + 2 # Affine transformation + interval = ChebyshevZerosInterval(f(-1.0), f(1.0), 3) + value = np.sqrt(3) / 2 + assert np.isclose(interval[0], f(-value)) + vector = interval.to_vector() + assert np.allclose(vector, np.array([f(-value), f(0), f(value)])) + + def test_regular_closed_mesh(self): + dimension = 5 + intervals = [RegularClosedInterval(0.0, 10, 11) for _ in range(dimension)] + mesh = Mesh(intervals) + assert np.array_equal(mesh[1, 2, 3, 4, 5], np.array([1, 2, 3, 4, 5])) + + def test_regular_half_open_mesh(self): + dimension = 5 + intervals = [RegularHalfOpenInterval(0.0, 10, 10) for _ in range(dimension)] + mesh = Mesh(intervals) + assert np.array_equal(mesh[1, 2, 3, 4, 5], np.array([1, 2, 3, 4, 5])) + + def test_chebyshev_zeros_mesh(self): + dimension = 3 + f = lambda x: 2 * x + 2 + value = np.sqrt(3) / 2 + intervals = [ + ChebyshevZerosInterval(f(-1.0), f(1.0), 3) for _ in range(dimension) + ] + mesh = Mesh(intervals) + assert np.allclose(mesh[0, 1, 2], np.array([f(-value), f(0), f(value)])) + + def test_mesh_totensor(self): + dimension = 2 + intervals = [RegularClosedInterval(0.0, 1.0, 2) for _ in range(dimension)] + mesh = Mesh(intervals) + tensor = mesh.to_tensor() + assert np.array_equal(tensor[0, 0], [0, 0]) + assert np.array_equal(tensor[0, 1], [0, 1]) + assert np.array_equal(tensor[1, 0], [1, 0]) + assert np.array_equal(tensor[1, 1], [1, 1])