Skip to content

Commit

Permalink
Revert "Merge branch 'main' into main"
Browse files Browse the repository at this point in the history
This reverts commit 3e50daa, reversing
changes made to 119acc4.
  • Loading branch information
PaulaGarciaMolina committed Jan 19, 2024
1 parent 6d7b7f7 commit 466822b
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 177 deletions.
104 changes: 21 additions & 83 deletions src/seemps/analysis/evolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,35 +74,19 @@ def euler(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
state = simplify(state - Δβ * H_state, strategy=normalization_strategy)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -151,37 +135,21 @@ def improved_euler(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
k = simplify(2 * state - Δβ * H_state, strategy=strategy)
Hk = H.apply(k)
state = simplify(state - 0.5 * Δβ * Hk, strategy=normalization_strategy)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -231,19 +199,11 @@ def runge_kutta(
for i in range(maxiter):
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
state2 = simplify(state - 0.5 * Δβ * H_state, strategy=strategy)
H_state2 = H.apply(state2)
state3 = simplify(state - 0.5 * Δβ * H_state2, strategy=strategy)
Expand All @@ -256,19 +216,11 @@ def runge_kutta(
)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
β = Δβ * np.arange(maxiter + 1)
return EvolutionResults(
state=best_vector,
Expand Down Expand Up @@ -323,19 +275,8 @@ def runge_kutta_fehlberg(
while i < maxiter:
H_state = H.apply(state)
E = H.expectation(state).real
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβ,
),
)
callback(state)
k1 = -1 * H_state
state2 = simplify(state + 0.25 * Δβ * k1, strategy=strategy)
k2 = -1 * H.apply(state2)
Expand Down Expand Up @@ -398,34 +339,31 @@ def runge_kutta_fehlberg(
)
).real
if δ > tol_rk:
energies.pop()
if callback is not None:
callback(None)
Δβ = 0.9 * Δβ * (tol_rk / δ) ** (1 / 5)
elif δ == 0:
i += 1
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
state = state_ord5
Δβs.append(Δβ)
else:
i += 1
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
state = state_ord5
Δβs.append(Δβ)
Δβ = 0.9 * Δβ * (tol_rk / δ) ** (1 / 5)
H_state = H.apply(state)
E = H.expectation(state).real
if callback is not None:
callback(state)
energies.append(E)
if E < best_energy:
best_energy, best_vector = E, state
if callback is not None:
callback(
state,
EvolutionResults(
state=best_vector,
energy=best_energy,
trajectory=energies,
Δβ=Δβs,
),
)
β = [0]
β_i = 0
for Δβ_i in Δβs:
Expand Down
41 changes: 8 additions & 33 deletions src/seemps/optimization/arnoldi.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ def arnoldi_eigh(
maxiter: int = 100,
nvectors: int = 10,
tol: float = 1e-13,
tol_up: float = 1e-13,
strategy: Strategy = DESCENT_STRATEGY,
miniter: int = 1,
callback: Optional[Callable] = None,
Expand All @@ -110,25 +109,15 @@ def arnoldi_eigh(
arnoldi.add_vector(v0)
v: MPS = operator @ v0 # type: ignore
best_energy = arnoldi.H[0, 0].real
if callback is not None:
callback(arnoldi.V[0])
variance = abs(scprod(v, v)) - best_energy * best_energy
best_vector = v0
energies: list[float] = [best_energy]
variances: list[float] = [variance]
last_eigenvalue = variance = np.Inf
message = f"Exceeded maximum number of steps {maxiter}"
converged = True
if callback is not None:
callback(
arnoldi.V[0],
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
for i in range(maxiter):
v, success = arnoldi.add_vector(v)
if not success and nvectors == 2:
Expand All @@ -148,33 +137,19 @@ def arnoldi_eigh(
eigenvalue - last_eigenvalue,
eigenvalue,
)
v = operator @ v # type: ignore
energy = arnoldi.H[0, 0].real
if len(arnoldi.V) == 1:
eigenvalue_change = energy - energies[-1]
if (
(eigenvalue_change > 0 and eigenvalue_change >= abs(tol_up))
or (eigenvalue_change < 0 and eigenvalue_change >= -abs(tol))
and i > miniter
):
eigenvalue_change >= abs(tol) or eigenvalue_change >= -abs(tol)
) and i > miniter:
message = f"Eigenvalue converged within tolerance {tol}"
converged = True
break
v = operator @ v # type: ignore
energy = arnoldi.H[0, 0].real
if callback is not None:
callback(arnoldi.V[0])
energies.append(energy)
if energy < best_energy:
best_energy, best_vector = energy, arnoldi.V[0]
if callback is not None:
callback(
arnoldi.V[0],
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
return OptimizeResults(
state=best_vector,
energy=best_energy,
Expand Down
35 changes: 6 additions & 29 deletions src/seemps/optimization/descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ def gradient_descent(
state: MPS,
maxiter=1000,
tol: float = 1e-13,
tol_up: float = 1e-13,
k_mean=10,
tol_variance: float = 1e-14,
strategy: Strategy = DESCENT_STRATEGY,
Expand Down Expand Up @@ -112,28 +111,16 @@ def energy_and_variance(state: MPS) -> tuple[MPS, float, float, float]:
state = CanonicalMPS(state, normalize=True)
for step in range(maxiter):
H_state, E, variance, avg_H2 = energy_and_variance(state)
if callback is not None:
callback(state)
if DEBUG:
log(f"step = {step:5d}, energy = {E}, variance = {variance}")
energies.append(E)
variances.append(variance)
if E < best_energy:
best_energy, best_vector, _ = E, state, variance
if callback is not None:
callback(
state,
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
E_mean: float = np.mean(energies[(-min(k_mean, len(energies)-1))-1 : -1]) # type: ignore
if (E_mean - last_E_mean > 0 and E_mean - last_E_mean >= abs(tol_up)) or (
E_mean - last_E_mean < 0 and E_mean - last_E_mean >= -abs(tol)
):
E_mean: float = np.mean(energies[(-min(k_mean, len(energies)-1))-1 : -1]) # type: ignore
if E_mean - last_E_mean >= abs(tol) or E_mean - last_E_mean >= -abs(tol):
message = f"Energy converged within tolerance {tol}"
converged = True
break
Expand All @@ -154,22 +141,12 @@ def energy_and_variance(state: MPS) -> tuple[MPS, float, float, float]:
# which was already calculated
if not converged:
H_state, E, variance, _ = energy_and_variance(state)
if callback is not None:
callback(state)
if E < best_energy:
best_energy, best_vector, _ = E, state, variance
energies.append(E)
variances.append(variance)
if callback is not None:
callback(
state,
OptimizeResults(
state=best_vector,
energy=best_energy,
converged=converged,
message=message,
trajectory=energies,
variances=variances,
),
)
return OptimizeResults(
state=best_vector,
energy=best_energy,
Expand Down
Loading

0 comments on commit 466822b

Please sign in to comment.