Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
PaulaGarciaMolina authored Jan 19, 2024
2 parents 119acc4 + a546da4 commit 3e50daa
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 53 deletions.
104 changes: 83 additions & 21 deletions src/seemps/analysis/evolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,19 +74,35 @@ def euler(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
state = simplify(state - Δβ * H_state, strategy=normalization_strategy)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -135,21 +151,37 @@ def improved_euler(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
k = simplify(2 * state - Δβ * H_state, strategy=strategy)
Hk = H.apply(k)
state = simplify(state - 0.5 * Δβ * Hk, strategy=normalization_strategy)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -199,11 +231,19 @@ def runge_kutta(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
state2 = simplify(state - 0.5 * Δβ * H_state, strategy=strategy)
H_state2 = H.apply(state2)
state3 = simplify(state - 0.5 * Δβ * H_state2, strategy=strategy)
Expand All @@ -216,11 +256,19 @@ def runge_kutta(
)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -275,8 +323,19 @@ def runge_kutta_fehlberg(
while i < maxiter:
H_state = H.apply(state)
E = H.expectation(state).real
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(state)
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
k1 = -1 * H_state
state2 = simplify(state + 0.25 * Δβ * k1, strategy=strategy)
k2 = -1 * H.apply(state2)
Expand Down Expand Up @@ -339,31 +398,34 @@ def runge_kutta_fehlberg(
)
).real
if δ > tol_rk:
energies.pop()
if callback is not None:
callback(None)
Δβ = 0.9 * Δβ * (tol_rk / δ) ** (1 / 5)
elif δ == 0:
i += 1
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
state = state_ord5
Δβs.append(Δβ)
else:
i += 1
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
state = state_ord5
Δβs.append(Δβ)
Δβ = 0.9 * Δβ * (tol_rk / δ) ** (1 / 5)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβs,
),
)
β = [0]
β_i = 0
for Δβ_i in Δβs:
Expand Down
41 changes: 33 additions & 8 deletions src/seemps/optimization/arnoldi.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def arnoldi_eigh(
maxiter: int = 100,
nvectors: int = 10,
tol: float = 1e-13,
tol_up: float = 1e-13,
strategy: Strategy = DESCENT_STRATEGY,
miniter: int = 1,
callback: Optional[Callable] = None,
Expand All @@ -109,15 +110,25 @@ def arnoldi_eigh(
arnoldi.add_vector(v0)
v: MPS = operator @ v0 # type: ignore
best_energy = arnoldi.H[0, 0].real
if callback is not None:
callback(arnoldi.V[0])
variance = abs(scprod(v, v)) - best_energy * best_energy
best_vector = v0
energies: list[float] = [best_energy]
variances: list[float] = [variance]
last_eigenvalue = variance = np.Inf
message = f"Exceeded maximum number of steps {maxiter}"
converged = True
if callback is not None:
callback(
arnoldi.V[0],
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
for i in range(maxiter):
v, success = arnoldi.add_vector(v)
if not success and nvectors == 2:
Expand All @@ -137,19 +148,33 @@ def arnoldi_eigh(
eigenvalue - last_eigenvalue,
eigenvalue,
)
v = operator @ v # type: ignore
energy = arnoldi.H[0, 0].real
if len(arnoldi.V) == 1:
eigenvalue_change = energy - energies[-1]
if (
eigenvalue_change >= abs(tol) or eigenvalue_change >= -abs(tol)
) and i > miniter:
(eigenvalue_change > 0 and eigenvalue_change >= abs(tol_up))
or (eigenvalue_change < 0 and eigenvalue_change >= -abs(tol))
and i > miniter
):
message = f"Eigenvalue converged within tolerance {tol}"
converged = True
break
v = operator @ v # type: ignore
energy = arnoldi.H[0, 0].real
if callback is not None:
callback(arnoldi.V[0])
energies.append(energy)
if energy < best_energy:
best_energy, best_vector = energy, arnoldi.V[0]
if callback is not None:
callback(
arnoldi.V[0],
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
return OptimizeResults(
state=best_vector,
energy=best_energy,
Expand Down
35 changes: 29 additions & 6 deletions src/seemps/optimization/descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def gradient_descent(
state: MPS,
maxiter=1000,
tol: float = 1e-13,
tol_up: float = 1e-13,
k_mean=10,
tol_variance: float = 1e-14,
strategy: Strategy = DESCENT_STRATEGY,
Expand Down Expand Up @@ -111,16 +112,28 @@ def energy_and_variance(state: MPS) -> tuple[MPS, float, float, float]:
state = CanonicalMPS(state, normalize=True)
for step in range(maxiter):
H_state, E, variance, avg_H2 = energy_and_variance(state)
if callback is not None:
callback(state)
if DEBUG:
log(f"step = {step:5d}, energy = {E}, variance = {variance}")
energies.append(E)
variances.append(variance)
if E < best_energy:
best_energy, best_vector, _ = E, state, variance
E_mean: float = np.mean(energies[(-min(k_mean, len(energies)-1))-1 : -1]) # type: ignore
if E_mean - last_E_mean >= abs(tol) or E_mean - last_E_mean >= -abs(tol):
if callback is not None:
callback(
state,
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
E_mean: float = np.mean(energies[(-min(k_mean, len(energies)-1))-1 : -1]) # type: ignore
if (E_mean - last_E_mean > 0 and E_mean - last_E_mean >= abs(tol_up)) or (
E_mean - last_E_mean < 0 and E_mean - last_E_mean >= -abs(tol)
):
message = f"Energy converged within tolerance {tol}"
converged = True
break
Expand All @@ -141,12 +154,22 @@ def energy_and_variance(state: MPS) -> tuple[MPS, float, float, float]:
# which was already calculated
if not converged:
H_state, E, variance, _ = energy_and_variance(state)
if callback is not None:
callback(state)
if E < best_energy:
best_energy, best_vector, _ = E, state, variance
energies.append(E)
variances.append(variance)
if callback is not None:
callback(
state,
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
return OptimizeResults(
state=best_vector,
energy=best_energy,
Expand Down
Loading

0 comments on commit 3e50daa

Please sign in to comment.