From 19bd6ef67dada5544bf492aa7d6e3cfc2a9091c4 Mon Sep 17 00:00:00 2001 From: "Documenter.jl" Date: Thu, 28 Nov 2024 02:36:28 +0000 Subject: [PATCH] build based on 0c99d6e --- dev/.documenter-siteinfo.json | 2 +- dev/apireference/index.html | 108 +-- dev/changelog/index.html | 2 +- dev/examples/FAST_hydro_thermal/index.html | 8 +- .../FAST_production_management/index.html | 2 +- dev/examples/FAST_quickstart/index.html | 2 +- dev/examples/Hydro_thermal/index.html | 16 +- dev/examples/SDDP.log | 808 +++++++++--------- dev/examples/SDDP_0.0.log | 6 +- dev/examples/SDDP_0.0625.log | 6 +- dev/examples/SDDP_0.125.log | 6 +- dev/examples/SDDP_0.25.log | 6 +- dev/examples/SDDP_0.375.log | 6 +- dev/examples/SDDP_0.5.log | 6 +- dev/examples/SDDP_0.625.log | 6 +- dev/examples/SDDP_0.75.log | 6 +- dev/examples/SDDP_0.875.log | 6 +- dev/examples/SDDP_1.0.log | 6 +- .../index.html | 24 +- .../index.html | 18 +- .../index.html | 14 +- .../index.html | 14 +- .../agriculture_mccardle_farm/index.html | 2 +- dev/examples/air_conditioning/index.html | 14 +- .../air_conditioning_forward/index.html | 2 +- dev/examples/all_blacks/index.html | 8 +- .../asset_management_simple/index.html | 20 +- .../asset_management_stagewise/index.html | 24 +- dev/examples/belief/index.html | 24 +- dev/examples/biobjective_hydro/index.html | 62 +- dev/examples/booking_management/index.html | 2 +- dev/examples/generation_expansion/index.html | 30 +- dev/examples/hydro_valley/index.html | 2 +- .../infinite_horizon_hydro_thermal/index.html | 20 +- .../infinite_horizon_trivial/index.html | 12 +- dev/examples/no_strong_duality/index.html | 8 +- .../objective_state_newsvendor/index.html | 299 ++++--- dev/examples/sldp_example_one/index.html | 27 +- dev/examples/sldp_example_two/index.html | 38 +- dev/examples/stochastic_all_blacks/index.html | 10 +- dev/examples/the_farmers_problem/index.html | 10 +- dev/examples/vehicle_location/index.html | 2 +- dev/explanation/risk/index.html | 14 +- dev/explanation/theory_intro/index.html | 294 +++---- .../access_previous_variables/index.html | 2 +- .../index.html | 2 +- dev/guides/add_a_risk_measure/index.html | 16 +- dev/guides/add_integrality/index.html | 2 +- .../add_multidimensional_noise/index.html | 2 +- .../index.html | 2 +- dev/guides/choose_a_stopping_rule/index.html | 2 +- dev/guides/create_a_belief_state/index.html | 2 +- .../create_a_general_policy_graph/index.html | 2 +- dev/guides/debug_a_model/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- dev/index.html | 2 +- dev/release_notes/index.html | 2 +- dev/tutorial/SDDP.log | 392 ++++----- dev/tutorial/arma/index.html | 53 +- dev/tutorial/convex.cuts.json | 2 +- dev/tutorial/decision_hazard/index.html | 2 +- .../example_milk_producer/08bbc6cb.svg | 625 -------------- .../example_milk_producer/293d6bc8.svg | 625 ++++++++++++++ .../example_milk_producer/51c96f86.svg | 544 ------------ .../example_milk_producer/77e8c057.svg | 544 ++++++++++++ .../{055226c1.svg => ee182ae1.svg} | 238 +++--- dev/tutorial/example_milk_producer/index.html | 68 +- dev/tutorial/example_newsvendor/120873e5.svg | 37 + dev/tutorial/example_newsvendor/31380ebe.svg | 98 --- dev/tutorial/example_newsvendor/75368895.svg | 37 - dev/tutorial/example_newsvendor/e6131d3b.svg | 100 +++ dev/tutorial/example_newsvendor/index.html | 180 ++-- dev/tutorial/example_reservoir/0e006628.svg | 86 ++ .../{c70d804a.svg => 3ebc5c2b.svg} | 76 +- .../{b2cec97b.svg => 6352eda2.svg} | 64 +- .../{f6caca7e.svg => 99239794.svg} | 76 +- .../{1e774439.svg => b237d57b.svg} | 64 +- .../{be9e547c.svg => b70cc688.svg} | 172 ++-- .../{6735fb31.svg => ccc1a9fc.svg} | 268 +++--- dev/tutorial/example_reservoir/e5433df1.svg | 86 -- dev/tutorial/example_reservoir/index.html | 99 +-- dev/tutorial/first_steps/index.html | 28 +- .../inventory/{1851bc25.svg => 273b59b4.svg} | 86 +- .../inventory/{98af5933.svg => 741393b0.svg} | 72 +- dev/tutorial/inventory/index.html | 67 +- dev/tutorial/markov_uncertainty/index.html | 10 +- dev/tutorial/mdps/index.html | 18 +- dev/tutorial/objective_states/index.html | 38 +- dev/tutorial/objective_uncertainty/index.html | 12 +- dev/tutorial/pglib_opf/index.html | 41 +- .../plotting/{69a2680c.svg => 07c31bb8.svg} | 130 +-- dev/tutorial/plotting/index.html | 10 +- dev/tutorial/spaghetti_plot.html | 2 +- dev/tutorial/warnings/index.html | 14 +- 95 files changed, 3555 insertions(+), 3553 deletions(-) delete mode 100644 dev/tutorial/example_milk_producer/08bbc6cb.svg create mode 100644 dev/tutorial/example_milk_producer/293d6bc8.svg delete mode 100644 dev/tutorial/example_milk_producer/51c96f86.svg create mode 100644 dev/tutorial/example_milk_producer/77e8c057.svg rename dev/tutorial/example_milk_producer/{055226c1.svg => ee182ae1.svg} (59%) create mode 100644 dev/tutorial/example_newsvendor/120873e5.svg delete mode 100644 dev/tutorial/example_newsvendor/31380ebe.svg delete mode 100644 dev/tutorial/example_newsvendor/75368895.svg create mode 100644 dev/tutorial/example_newsvendor/e6131d3b.svg create mode 100644 dev/tutorial/example_reservoir/0e006628.svg rename dev/tutorial/example_reservoir/{c70d804a.svg => 3ebc5c2b.svg} (84%) rename dev/tutorial/example_reservoir/{b2cec97b.svg => 6352eda2.svg} (85%) rename dev/tutorial/example_reservoir/{f6caca7e.svg => 99239794.svg} (84%) rename dev/tutorial/example_reservoir/{1e774439.svg => b237d57b.svg} (85%) rename dev/tutorial/example_reservoir/{be9e547c.svg => b70cc688.svg} (84%) rename dev/tutorial/example_reservoir/{6735fb31.svg => ccc1a9fc.svg} (72%) delete mode 100644 dev/tutorial/example_reservoir/e5433df1.svg rename dev/tutorial/inventory/{1851bc25.svg => 273b59b4.svg} (84%) rename dev/tutorial/inventory/{98af5933.svg => 741393b0.svg} (84%) rename dev/tutorial/plotting/{69a2680c.svg => 07c31bb8.svg} (84%) diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json index c036e5788..c4a503314 100644 --- a/dev/.documenter-siteinfo.json +++ b/dev/.documenter-siteinfo.json @@ -1 +1 @@ -{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-11-28T00:09:49","documenter_version":"1.8.0"}} \ No newline at end of file +{"documenter":{"julia_version":"1.11.1","generation_timestamp":"2024-11-28T02:36:17","documenter_version":"1.8.0"}} \ No newline at end of file diff --git a/dev/apireference/index.html b/dev/apireference/index.html index 9e0d785a7..b4a2254a6 100644 --- a/dev/apireference/index.html +++ b/dev/apireference/index.html @@ -25,7 +25,7 @@ Nodes {} Arcs - {}source
SDDP.add_nodeFunction
add_node(graph::Graph{T}, node::T) where {T}

Add a node to the graph graph.

Examples

julia> graph = SDDP.Graph(:root);
+ {}
source
SDDP.add_nodeFunction
add_node(graph::Graph{T}, node::T) where {T}

Add a node to the graph graph.

Examples

julia> graph = SDDP.Graph(:root);
 
 julia> SDDP.add_node(graph, :A)
 
@@ -45,7 +45,7 @@
 Nodes
  2
 Arcs
- {}
source
SDDP.add_edgeFunction
add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}

Add an edge to the graph graph.

Examples

julia> graph = SDDP.Graph(0);
+ {}
source
SDDP.add_edgeFunction
add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T}

Add an edge to the graph graph.

Examples

julia> graph = SDDP.Graph(0);
 
 julia> SDDP.add_node(graph, 1)
 
@@ -69,7 +69,7 @@
 Nodes
  A
 Arcs
- root => A w.p. 1.0
source
SDDP.add_ambiguity_setFunction
add_ambiguity_set(
+ root => A w.p. 1.0
source
SDDP.add_ambiguity_setFunction
add_ambiguity_set(
     graph::Graph{T},
     set::Vector{T},
     lipschitz::Vector{Float64},
@@ -102,7 +102,7 @@
  2 => 3 w.p. 1.0
 Partitions
  {1, 2}
- {3}
source
add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)

Add set to the belief partition of graph.

lipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.

Examples

julia> graph = SDDP.LinearGraph(3);
+ {3}
source
add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)

Add set to the belief partition of graph.

lipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.

Examples

julia> graph = SDDP.LinearGraph(3);
 
 julia> SDDP.add_ambiguity_set(graph, [1, 2], 1e3)
 
@@ -121,7 +121,7 @@
  2 => 3 w.p. 1.0
 Partitions
  {1, 2}
- {3}
source
SDDP.LinearGraphFunction
LinearGraph(stages::Int)

Create a linear graph with stages number of nodes.

Examples

julia> graph = SDDP.LinearGraph(3)
+ {3}
source
SDDP.LinearGraphFunction
LinearGraph(stages::Int)

Create a linear graph with stages number of nodes.

Examples

julia> graph = SDDP.LinearGraph(3)
 Root
  0
 Nodes
@@ -131,7 +131,7 @@
 Arcs
  0 => 1 w.p. 1.0
  1 => 2 w.p. 1.0
- 2 => 3 w.p. 1.0
source
SDDP.MarkovianGraphFunction
MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})

Construct a Markovian graph from the vector of transition matrices.

transition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.

The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.

Examples

julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])
+ 2 => 3 w.p. 1.0
source
SDDP.MarkovianGraphFunction
MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})

Construct a Markovian graph from the vector of transition matrices.

transition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.

The dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.

Examples

julia> graph = SDDP.MarkovianGraph([ones(1, 1), [0.5 0.5], [0.8 0.2; 0.2 0.8]])
 Root
  (0, 1)
 Nodes
@@ -147,7 +147,7 @@
  (2, 1) => (3, 1) w.p. 0.8
  (2, 1) => (3, 2) w.p. 0.2
  (2, 2) => (3, 1) w.p. 0.2
- (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(;
+ (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(;
     stages::Int,
     transition_matrix::Matrix{Float64},
     root_node_transition::Vector{Float64},
@@ -175,11 +175,11 @@
  (2, 1) => (3, 1) w.p. 0.8
  (2, 1) => (3, 2) w.p. 0.2
  (2, 2) => (3, 1) w.p. 0.2
- (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(
+ (2, 2) => (3, 2) w.p. 0.8
source
MarkovianGraph(
     simulator::Function;
     budget::Union{Int,Vector{Int}},
     scenarios::Int = 1000,
-)

Construct a Markovian graph by fitting Markov chain to scenarios generated by simulator().

budget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.

source
SDDP.UnicyclicGraphFunction
UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)

Construct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.

Examples

julia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)
+)

Construct a Markovian graph by fitting Markov chain to scenarios generated by simulator().

budget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.

source
SDDP.UnicyclicGraphFunction
UnicyclicGraph(discount_factor::Float64; num_nodes::Int = 1)

Construct a graph composed of num_nodes nodes that form a single cycle, with a probability of discount_factor of continuing the cycle.

Examples

julia> graph = SDDP.UnicyclicGraph(0.9; num_nodes = 2)
 Root
  0
 Nodes
@@ -188,7 +188,7 @@
 Arcs
  0 => 1 w.p. 1.0
  1 => 2 w.p. 1.0
- 2 => 1 w.p. 0.9
source
SDDP.LinearPolicyGraphFunction
LinearPolicyGraph(builder::Function; stages::Int, kwargs...)

Create a linear policy graph with stages number of stages.

Keyword arguments

  • stages: the number of stages in the graph

  • kwargs: other keyword arguments are passed to SDDP.PolicyGraph.

Examples

julia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
+ 2 => 1 w.p. 0.9
source
SDDP.LinearPolicyGraphFunction
LinearPolicyGraph(builder::Function; stages::Int, kwargs...)

Create a linear policy graph with stages number of stages.

Keyword arguments

  • stages: the number of stages in the graph

  • kwargs: other keyword arguments are passed to SDDP.PolicyGraph.

Examples

julia> SDDP.LinearPolicyGraph(; stages = 2, lower_bound = 0.0) do sp, t
     # ... build model ...
 end
 A policy graph with 2 nodes.
@@ -198,7 +198,7 @@
     # ... build model ...
 end
 A policy graph with 2 nodes.
-Node indices: 1, 2
source
SDDP.MarkovianPolicyGraphFunction
MarkovianPolicyGraph(
+Node indices: 1, 2
source
SDDP.MarkovianPolicyGraphFunction
MarkovianPolicyGraph(
     builder::Function;
     transition_matrices::Vector{Array{Float64,2}},
     kwargs...
@@ -215,7 +215,7 @@
     # ... build model ...
 end
 A policy graph with 5 nodes.
- Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
source
SDDP.PolicyGraphType
PolicyGraph(
+ Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)
source
SDDP.PolicyGraphType
PolicyGraph(
     builder::Function,
     graph::Graph{T};
     sense::Symbol = :Min,
@@ -237,28 +237,28 @@
     optimizer = HiGHS.Optimizer,
 ) do subproblem, index
     # ... subproblem definitions ...
-end
source

Subproblem definition

SDDP.@stageobjectiveMacro
@stageobjective(subproblem, expr)

Set the stage-objective of subproblem to expr.

Examples

@stageobjective(subproblem, 2x + y)
source
SDDP.parameterizeFunction
parameterize(
+end
source

Subproblem definition

SDDP.@stageobjectiveMacro
@stageobjective(subproblem, expr)

Set the stage-objective of subproblem to expr.

Examples

@stageobjective(subproblem, 2x + y)
source
SDDP.parameterizeFunction
parameterize(
     modify::Function,
     subproblem::JuMP.Model,
     realizations::Vector{T},
     probability::Vector{Float64} = fill(1.0 / length(realizations))
 ) where {T}

Add a parameterization function modify to subproblem. The modify function takes one argument and modifies subproblem based on the realization of the noise sampled from realizations with corresponding probabilities probability.

In order to conduct an out-of-sample simulation, modify should accept arguments that are not in realizations (but still of type T).

Examples

SDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω
     JuMP.set_upper_bound(x, ω)
-end
source
parameterize(node::Node, noise)

Parameterize node node with the noise noise.

source
SDDP.add_objective_stateFunction
add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)

Add an objective state variable to subproblem.

Required kwargs are:

  • initial_value: The initial value of the objective state variable at the root node.
  • lipschitz: The lipschitz constant of the objective state variable.

Setting a tight value for the lipschitz constant can significantly improve the speed of convergence.

Optional kwargs are:

  • lower_bound: A valid lower bound for the objective state variable. Can be -Inf.
  • upper_bound: A valid upper bound for the objective state variable. Can be +Inf.

Setting tight values for these optional variables can significantly improve the speed of convergence.

If the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).

source
SDDP.objective_stateFunction
objective_state(subproblem::JuMP.Model)

Return the current objective state of the problem.

Can only be called from SDDP.parameterize.

source
SDDP.NoiseType
Noise(support, probability)

An atom of a discrete random variable at the point of support support and associated probability probability.

source

Training the policy

SDDP.numerical_stability_reportFunction
numerical_stability_report(
+end
source
parameterize(node::Node, noise)

Parameterize node node with the noise noise.

source
SDDP.add_objective_stateFunction
add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)

Add an objective state variable to subproblem.

Required kwargs are:

  • initial_value: The initial value of the objective state variable at the root node.
  • lipschitz: The lipschitz constant of the objective state variable.

Setting a tight value for the lipschitz constant can significantly improve the speed of convergence.

Optional kwargs are:

  • lower_bound: A valid lower bound for the objective state variable. Can be -Inf.
  • upper_bound: A valid upper bound for the objective state variable. Can be +Inf.

Setting tight values for these optional variables can significantly improve the speed of convergence.

If the objective state is N-dimensional, each keyword argument must be an NTuple{N,Float64}. For example, initial_value = (0.0, 1.0).

source
SDDP.objective_stateFunction
objective_state(subproblem::JuMP.Model)

Return the current objective state of the problem.

Can only be called from SDDP.parameterize.

source
SDDP.NoiseType
Noise(support, probability)

An atom of a discrete random variable at the point of support support and associated probability probability.

source

Training the policy

SDDP.numerical_stability_reportFunction
numerical_stability_report(
     [io::IO = stdout,]
     model::PolicyGraph;
     by_node::Bool = false,
     print::Bool = true,
     warn::Bool = true,
-)

Print a report identifying possible numeric stability issues.

Keyword arguments

  • If by_node, print a report for each node in the graph.

  • If print, print to io.

  • If warn, warn if the coefficients may cause numerical issues.

source
SDDP.trainFunction
SDDP.train(model::PolicyGraph; kwargs...)

Train the policy for model.

Keyword arguments

  • iteration_limit::Int: number of iterations to conduct before termination.

  • time_limit::Float64: number of seconds to train before termination.

  • stoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.

  • print_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.

  • log_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.

  • log_frequency::Int: control the frequency with which the logging is outputted (iterations/log). It must be at least 1. Defaults to 1.

  • log_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.

  • log_every_iteration::Bool; over-rides log_frequency and log_every_seconds to force every iteration to be printed. Defaults to false.

  • run_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.

  • refine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.

  • cut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.

  • risk_measure: the risk measure to use at each node. Defaults to Expectation.

  • root_node_risk_measure::AbstractRiskMeasure: the risk measure to use at the root node when computing the Bound column. Note that the choice of this option does not change the primal policy, and it applies only if the transition from the root node to the first stage is stochastic. Defaults to Expectation.

  • sampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.

  • backward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.

  • cut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.

  • dashboard::Bool: open a visualization of the training over time. Defaults to false.

  • parallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Threaded().

  • forward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.

  • forward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).

  • add_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.

  • duality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.

  • post_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.

There is also a special option for infinite horizon problems

  • cycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.
source
SDDP.termination_statusFunction
termination_status(model::PolicyGraph)::Symbol

Query the reason why the training stopped.

source
SDDP.write_cuts_to_fileFunction
write_cuts_to_file(
+)

Print a report identifying possible numeric stability issues.

Keyword arguments

  • If by_node, print a report for each node in the graph.

  • If print, print to io.

  • If warn, warn if the coefficients may cause numerical issues.

source
SDDP.trainFunction
SDDP.train(model::PolicyGraph; kwargs...)

Train the policy for model.

Keyword arguments

  • iteration_limit::Int: number of iterations to conduct before termination.

  • time_limit::Float64: number of seconds to train before termination.

  • stoping_rules: a vector of SDDP.AbstractStoppingRules. Defaults to SimulationStoppingRule.

  • print_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.

  • log_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.

  • log_frequency::Int: control the frequency with which the logging is outputted (iterations/log). It must be at least 1. Defaults to 1.

  • log_every_seconds::Float64: control the frequency with which the logging is outputted (seconds/log). Defaults to 0.0.

  • log_every_iteration::Bool; over-rides log_frequency and log_every_seconds to force every iteration to be printed. Defaults to false.

  • run_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.

  • refine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.

  • cut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.

  • risk_measure: the risk measure to use at each node. Defaults to Expectation.

  • root_node_risk_measure::AbstractRiskMeasure: the risk measure to use at the root node when computing the Bound column. Note that the choice of this option does not change the primal policy, and it applies only if the transition from the root node to the first stage is stochastic. Defaults to Expectation.

  • sampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.

  • backward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.

  • cut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.

  • dashboard::Bool: open a visualization of the training over time. Defaults to false.

  • parallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Threaded().

  • forward_pass::AbstractForwardPass: specify a scheme to use for the forward passes.

  • forward_pass_resampling_probability::Union{Nothing,Float64}: set to a value in (0, 1) to enable RiskAdjustedForwardPass. Defaults to nothing (disabled).

  • add_to_existing_cuts::Bool: set to true to allow training a model that was previously trained. Defaults to false.

  • duality_handler::AbstractDualityHandler: specify a duality handler to use when creating cuts.

  • post_iteration_callback::Function: a callback with the signature post_iteration_callback(::IterationResult) that is evaluated after each iteration of the algorithm.

There is also a special option for infinite horizon problems

  • cycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.
source
SDDP.termination_statusFunction
termination_status(model::PolicyGraph)::Symbol

Query the reason why the training stopped.

source
SDDP.write_cuts_to_fileFunction
write_cuts_to_file(
     model::PolicyGraph{T},
     filename::String;
     kwargs...,
-) where {T}

Write the cuts that form the policy in model to filename in JSON format.

Keyword arguments

  • node_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.

  • write_only_selected_cuts write only the selected cuts to the json file. Defaults to false.

See also SDDP.read_cuts_from_file.

source
SDDP.read_cuts_from_fileFunction
read_cuts_from_file(
+) where {T}

Write the cuts that form the policy in model to filename in JSON format.

Keyword arguments

  • node_name_parser is a function which converts the name of each node into a string representation. It has the signature: node_name_parser(::T)::String.

  • write_only_selected_cuts write only the selected cuts to the json file. Defaults to false.

See also SDDP.read_cuts_from_file.

source
SDDP.read_cuts_from_fileFunction
read_cuts_from_file(
     model::PolicyGraph{T},
     filename::String;
     kwargs...,
-) where {T}

Read cuts (saved using SDDP.write_cuts_to_file) from filename into model.

Since T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature

Keyword arguments

  • node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name. If node_name_parser returns nothing, those cuts are skipped.

  • cut_selection::Bool run or not the cut selection algorithm when adding the cuts to the model.

See also SDDP.write_cuts_to_file.

source
SDDP.write_log_to_csvFunction
write_log_to_csv(model::PolicyGraph, filename::String)

Write the log of the most recent training to a csv for post-analysis.

Assumes that the model has been trained via SDDP.train.

source
SDDP.set_numerical_difficulty_callbackFunction
set_numerical_difficulty_callback(
+) where {T}

Read cuts (saved using SDDP.write_cuts_to_file) from filename into model.

Since T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature

Keyword arguments

  • node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name. If node_name_parser returns nothing, those cuts are skipped.

  • cut_selection::Bool run or not the cut selection algorithm when adding the cuts to the model.

See also SDDP.write_cuts_to_file.

source
SDDP.write_log_to_csvFunction
write_log_to_csv(model::PolicyGraph, filename::String)

Write the log of the most recent training to a csv for post-analysis.

Assumes that the model has been trained via SDDP.train.

source
SDDP.set_numerical_difficulty_callbackFunction
set_numerical_difficulty_callback(
     model::PolicyGraph,
     callback::Function,
 )

Set a callback function callback(::PolicyGraph, ::Node; require_dual::Bool) that is run when the optimizer terminates without finding a primal solution (and dual solution if require_dual is true).

Default callback

The default callback is a small variation of:

function callback(::PolicyGraph, node::Node; require_dual::Bool)
@@ -274,29 +274,29 @@
     end
     return
 end
-SDDP.set_numerical_difficulty_callback(model, callback)
source

Stopping rules

SDDP.AbstractStoppingRuleType
AbstractStoppingRule

The abstract type for the stopping-rule interface.

You need to define the following methods:

source
SDDP.stopping_rule_statusFunction
stopping_rule_status(::AbstractStoppingRule)::Symbol

Return a symbol describing the stopping rule.

source
SDDP.convergence_testFunction
convergence_test(
+SDDP.set_numerical_difficulty_callback(model, callback)
source

Stopping rules

SDDP.AbstractStoppingRuleType
AbstractStoppingRule

The abstract type for the stopping-rule interface.

You need to define the following methods:

source
SDDP.stopping_rule_statusFunction
stopping_rule_status(::AbstractStoppingRule)::Symbol

Return a symbol describing the stopping rule.

source
SDDP.convergence_testFunction
convergence_test(
     model::PolicyGraph,
     log::Vector{Log},
     ::AbstractStoppingRule,
-)::Bool

Return a Bool indicating if the algorithm should terminate the training.

source
SDDP.IterationLimitType
IterationLimit(limit::Int)

Teriminate the algorithm after limit number of iterations.

source
SDDP.TimeLimitType
TimeLimit(limit::Float64)

Teriminate the algorithm after limit seconds of computation.

source
SDDP.StatisticalType
Statistical(;
+)::Bool

Return a Bool indicating if the algorithm should terminate the training.

source
SDDP.IterationLimitType
IterationLimit(limit::Int)

Teriminate the algorithm after limit number of iterations.

source
SDDP.TimeLimitType
TimeLimit(limit::Float64)

Teriminate the algorithm after limit seconds of computation.

source
SDDP.StatisticalType
Statistical(;
     num_replications::Int,
     iteration_period::Int = 1,
     z_score::Float64 = 1.96,
     verbose::Bool = true,
     disable_warning::Bool = false,
-)

Perform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods and terminate if the deterministic bound (lower if minimizing) falls into the confidence interval for the mean of the simulated cost.

If verbose = true, print the confidence interval.

If disable_warning = true, disable the warning telling you not to use this stopping rule (see below).

Why this stopping rule is not good

This stopping rule is one of the most common stopping rules seen in the literature. Don't follow the crowd. It is a poor choice for your model, and should be rarely used. Instead, you should use the default stopping rule, or use a fixed limit like a time or iteration limit.

To understand why this stopping rule is a bad idea, assume we have conducted num_replications simulations and the objectives are in a vector objectives::Vector{Float64}.

Our mean is μ = mean(objectives) and the half-width of the confidence interval is w = z_score * std(objectives) / sqrt(num_replications).

Many papers suggest terminating the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) is contained within the confidence interval. That is, if μ - w <= bound <= μ + w. Even worse, some papers define an optimization gap of (μ + w) / bound (if minimizing) or (μ - w) / bound (if maximizing), and they terminate once the gap is less than a value like 1%.

Both of these approaches are misleading, and more often than not, they will result in terminating with a sub-optimal policy that performs worse than expected. There are two main reasons for this:

  1. The half-width depends on the number of replications. To reduce the computational cost, users are often tempted to choose a small number of replications. This increases the half-width and makes it more likely that the algorithm will stop early. But if we choose a large number of replications, then the computational cost is high, and we would have been better off to run a fixed number of iterations and use that computational time to run extra training iterations.
  2. The confidence interval assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.

There is a third, more technical reason which relates to the conditional dependence of constructing multiple confidence intervals.

The default value of z_score = 1.96 corresponds to a 95% confidence interval. You should interpret the interval as "if we re-run this simulation 100 times, then the true mean will lie in the confidence interval 95 times out of 100." But if the bound is within the confidence interval, then we know the true mean cannot be better than the bound. Therfore, there is a more than 95% chance that the mean is within the interval.

A separate problem arises if we simulate, find that the bound is outside the confidence interval, keep training, and then re-simulate to compute a new confidence interval. Because we will terminate when the bound enters the confidence interval, the repeated construction of a confidence interval means that the unconditional probability that we terminate with a false positive is larger than 5% (there are now more chances that the sample mean is optimistic and that the confidence interval includes the bound but not the true mean). One fix is to simulate with a sequentially increasing number of replicates, so that the unconditional probability stays at 95%, but this runs into the problem of computational cost. For more information on sequential sampling, see, for example, Güzin Bayraksan, David P. Morton, (2011) A Sequential Sampling Procedure for Stochastic Programming. Operations Research 59(4):898-913.

source
SDDP.BoundStallingType
BoundStalling(num_previous_iterations::Int, tolerance::Float64)

Teriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.

Checking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.

source
SDDP.StoppingChainType
StoppingChain(rules::AbstractStoppingRule...)

Terminate once all of the rules are statified.

This stopping rule short-circuits, so subsequent rules are only tested if the previous pass.

Examples

A stopping rule that runs 100 iterations, then checks for the bound stalling:

StoppingChain(IterationLimit(100), BoundStalling(5, 0.1))
source
SDDP.SimulationStoppingRuleType
SimulationStoppingRule(;
+)

Perform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods and terminate if the deterministic bound (lower if minimizing) falls into the confidence interval for the mean of the simulated cost.

If verbose = true, print the confidence interval.

If disable_warning = true, disable the warning telling you not to use this stopping rule (see below).

Why this stopping rule is not good

This stopping rule is one of the most common stopping rules seen in the literature. Don't follow the crowd. It is a poor choice for your model, and should be rarely used. Instead, you should use the default stopping rule, or use a fixed limit like a time or iteration limit.

To understand why this stopping rule is a bad idea, assume we have conducted num_replications simulations and the objectives are in a vector objectives::Vector{Float64}.

Our mean is μ = mean(objectives) and the half-width of the confidence interval is w = z_score * std(objectives) / sqrt(num_replications).

Many papers suggest terminating the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) is contained within the confidence interval. That is, if μ - w <= bound <= μ + w. Even worse, some papers define an optimization gap of (μ + w) / bound (if minimizing) or (μ - w) / bound (if maximizing), and they terminate once the gap is less than a value like 1%.

Both of these approaches are misleading, and more often than not, they will result in terminating with a sub-optimal policy that performs worse than expected. There are two main reasons for this:

  1. The half-width depends on the number of replications. To reduce the computational cost, users are often tempted to choose a small number of replications. This increases the half-width and makes it more likely that the algorithm will stop early. But if we choose a large number of replications, then the computational cost is high, and we would have been better off to run a fixed number of iterations and use that computational time to run extra training iterations.
  2. The confidence interval assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.

There is a third, more technical reason which relates to the conditional dependence of constructing multiple confidence intervals.

The default value of z_score = 1.96 corresponds to a 95% confidence interval. You should interpret the interval as "if we re-run this simulation 100 times, then the true mean will lie in the confidence interval 95 times out of 100." But if the bound is within the confidence interval, then we know the true mean cannot be better than the bound. Therfore, there is a more than 95% chance that the mean is within the interval.

A separate problem arises if we simulate, find that the bound is outside the confidence interval, keep training, and then re-simulate to compute a new confidence interval. Because we will terminate when the bound enters the confidence interval, the repeated construction of a confidence interval means that the unconditional probability that we terminate with a false positive is larger than 5% (there are now more chances that the sample mean is optimistic and that the confidence interval includes the bound but not the true mean). One fix is to simulate with a sequentially increasing number of replicates, so that the unconditional probability stays at 95%, but this runs into the problem of computational cost. For more information on sequential sampling, see, for example, Güzin Bayraksan, David P. Morton, (2011) A Sequential Sampling Procedure for Stochastic Programming. Operations Research 59(4):898-913.

source
SDDP.BoundStallingType
BoundStalling(num_previous_iterations::Int, tolerance::Float64)

Teriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations, provided it has improved relative to the bound after the first iteration.

Checking for an improvement relative to the first iteration avoids early termination in a situation where the bound fails to improve for the first N iterations. This frequently happens in models with a large number of stages, where it takes time for the cuts to propogate backward enough to modify the bound of the root node.

source
SDDP.StoppingChainType
StoppingChain(rules::AbstractStoppingRule...)

Terminate once all of the rules are statified.

This stopping rule short-circuits, so subsequent rules are only tested if the previous pass.

Examples

A stopping rule that runs 100 iterations, then checks for the bound stalling:

StoppingChain(IterationLimit(100), BoundStalling(5, 0.1))
source
SDDP.SimulationStoppingRuleType
SimulationStoppingRule(;
     sampling_scheme::AbstractSamplingScheme = SDDP.InSampleMonteCarlo(),
     replications::Int = -1,
     period::Int = -1,
     distance_tol::Float64 = 1e-2,
     bound_tol::Float64 = 1e-4,
-)

Terminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.

Termination criteria

First, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.

Then, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.

By default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.

Example

SDDP.train(model; stopping_rules = [SimulationStoppingRule()])
source
SDDP.FirstStageStoppingRuleType
FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)

Terminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.

Example

SDDP.train(model; stopping_rules = [FirstStageStoppingRule()])
source

Sampling schemes

SDDP.AbstractSamplingSchemeType
AbstractSamplingScheme

The abstract type for the sampling-scheme interface.

You need to define the following methods:

source
SDDP.sample_scenarioFunction
sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}

Sample a scenario from the policy graph graph based on the sampling scheme.

Returns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.

The scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.

source
SDDP.InSampleMonteCarloType
InSampleMonteCarlo(;
+)

Terminate the algorithm using a mix of heuristics. Unless you know otherwise, this is typically a good default.

Termination criteria

First, we check that the deterministic bound has stabilized. That is, over the last five iterations, the deterministic bound has changed by less than an absolute or relative tolerance of bound_tol.

Then, if we have not done one in the last period iterations, we perform a primal simulation of the policy using replications out-of-sample realizations from sampling_scheme. The realizations are stored and re-used in each simulation. From each simulation, we record the value of the stage objective. We terminate the policy if each of the trajectories in two consecutive simulations differ by less than distance_tol.

By default, replications and period are -1, and SDDP.jl will guess good values for these. Over-ride the default behavior by setting an appropriate value.

Example

SDDP.train(model; stopping_rules = [SimulationStoppingRule()])
source
SDDP.FirstStageStoppingRuleType
FirstStageStoppingRule(; atol::Float64 = 1e-3, iterations::Int = 50)

Terminate the algorithm when the outgoing values of the first-stage state variables have not changed by more than atol for iterations number of consecutive iterations.

Example

SDDP.train(model; stopping_rules = [FirstStageStoppingRule()])
source

Sampling schemes

SDDP.AbstractSamplingSchemeType
AbstractSamplingScheme

The abstract type for the sampling-scheme interface.

You need to define the following methods:

source
SDDP.sample_scenarioFunction
sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T}

Sample a scenario from the policy graph graph based on the sampling scheme.

Returns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.

The scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.

source
SDDP.InSampleMonteCarloType
InSampleMonteCarlo(;
     max_depth::Int = 0,
     terminate_on_cycle::Function = false,
     terminate_on_dummy_leaf::Function = true,
     rollout_limit::Function = (i::Int) -> typemax(Int),
     initial_node::Any = nothing,
-)

A Monte Carlo sampling scheme using the in-sample data from the policy graph definition.

If terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.

Note that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.

Control which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.

You can use rollout_limit to set iteration specific depth limits. For example:

InSampleMonteCarlo(rollout_limit = i -> 2 * i)
source
SDDP.OutOfSampleMonteCarloType
OutOfSampleMonteCarlo(
+)

A Monte Carlo sampling scheme using the in-sample data from the policy graph definition.

If terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.

Note that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.

Control which node the trajectories start from using initial_node. If it is left as nothing, the root node is used as the starting node.

You can use rollout_limit to set iteration specific depth limits. For example:

InSampleMonteCarlo(rollout_limit = i -> 2 * i)
source
SDDP.OutOfSampleMonteCarloType
OutOfSampleMonteCarlo(
     f::Function,
     graph::PolicyGraph;
     use_insample_transition::Bool = false,
@@ -315,7 +315,7 @@
     end
 end

Given linear policy graph graph with T stages:

sampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node
     return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]
-end
source
SDDP.HistoricalType
Historical(
+end
source
SDDP.HistoricalType
Historical(
     scenarios::Vector{Vector{Tuple{T,S}}},
     probability::Vector{Float64};
     terminate_on_cycle::Bool = false,
@@ -326,17 +326,17 @@
         [(1, 1.0), (2, 0.0), (3, 0.0)]
     ],
     [0.2, 0.5, 0.3],
-)
source
Historical(
+)
source
Historical(
     scenarios::Vector{Vector{Tuple{T,S}}};
     terminate_on_cycle::Bool = false,
 ) where {T,S}

A deterministic sampling scheme that iterates through the vector of provided scenarios.

Examples

Historical([
     [(1, 0.5), (2, 1.0), (3, 0.5)],
     [(1, 0.5), (2, 0.0), (3, 1.0)],
     [(1, 1.0), (2, 0.0), (3, 0.0)],
-])
source
Historical(
+])
source
Historical(
     scenario::Vector{Tuple{T,S}};
     terminate_on_cycle::Bool = false,
-) where {T,S}

A deterministic sampling scheme that always samples scenario.

Examples

Historical([(1, 0.5), (2, 1.5), (3, 0.75)])
source
SDDP.PSRSamplingSchemeType
PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())

A sampling scheme with N scenarios, similar to how PSR does it.

source
SDDP.SimulatorSamplingSchemeType
SimulatorSamplingScheme(simulator::Function)

Create a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().

This sampling scheme must be used with a Markovian graph constructed from the same simulator.

The sample space for SDDP.parameterize must be a tuple with 1 or 2 values, value is the Markov state and the second value is the random variable for the current node. If the node is deterministic, use Ω = [(markov_state,)].

This sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.

Example

julia> using SDDP
+) where {T,S}

A deterministic sampling scheme that always samples scenario.

Examples

Historical([(1, 0.5), (2, 1.5), (3, 0.75)])
source
SDDP.PSRSamplingSchemeType
PSRSamplingScheme(N::Int; sampling_scheme = InSampleMonteCarlo())

A sampling scheme with N scenarios, similar to how PSR does it.

source
SDDP.SimulatorSamplingSchemeType
SimulatorSamplingScheme(simulator::Function)

Create a sampling scheme based on a univariate scenario generator simulator, which returns a Vector{Float64} when called with no arguments like simulator().

This sampling scheme must be used with a Markovian graph constructed from the same simulator.

The sample space for SDDP.parameterize must be a tuple with 1 or 2 values, value is the Markov state and the second value is the random variable for the current node. If the node is deterministic, use Ω = [(markov_state,)].

This sampling scheme generates a new scenario by calling simulator(), and then picking the sequence of nodes in the Markovian graph that is closest to the new trajectory.

Example

julia> using SDDP
 
 julia> import HiGHS
 
@@ -368,50 +368,50 @@
            iteration_limit = 10,
            sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
        )
-
source

Parallel schemes

SDDP.AbstractParallelSchemeType
AbstractParallelScheme

Abstract type for different parallelism schemes.

source
SDDP.SerialType
Serial()

Run SDDP in serial mode.

source
SDDP.ThreadedType
Threaded()

Run SDDP in multi-threaded mode.

Use julia --threads N to start Julia with N threads. In most cases, you should pick N to be the number of physical cores on your machine.

Danger

This plug-in is experimental, and parts of SDDP.jl may not be threadsafe. If you encounter any problems or crashes, please open a GitHub issue.

Example

SDDP.train(model; parallel_scheme = SDDP.Threaded())
-SDDP.simulate(model; parallel_scheme = SDDP.Threaded())
source
SDDP.AsynchronousType
Asynchronous(
+
source

Parallel schemes

SDDP.AbstractParallelSchemeType
AbstractParallelScheme

Abstract type for different parallelism schemes.

source
SDDP.SerialType
Serial()

Run SDDP in serial mode.

source
SDDP.ThreadedType
Threaded()

Run SDDP in multi-threaded mode.

Use julia --threads N to start Julia with N threads. In most cases, you should pick N to be the number of physical cores on your machine.

Danger

This plug-in is experimental, and parts of SDDP.jl may not be threadsafe. If you encounter any problems or crashes, please open a GitHub issue.

Example

SDDP.train(model; parallel_scheme = SDDP.Threaded())
+SDDP.simulate(model; parallel_scheme = SDDP.Threaded())
source
SDDP.AsynchronousType
Asynchronous(
     [init_callback::Function,]
     slave_pids::Vector{Int} = workers();
     use_master::Bool = true,
-)

Run SDDP in asynchronous mode workers with pid's slave_pids.

After initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.

If use_master is true, iterations are also conducted on the master process.

source
Asynchronous(
+)

Run SDDP in asynchronous mode workers with pid's slave_pids.

After initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.

If use_master is true, iterations are also conducted on the master process.

source
Asynchronous(
     solver::Any,
     slave_pids::Vector{Int} = workers();
     use_master::Bool = true,
-)

Run SDDP in asynchronous mode workers with pid's slave_pids.

Set the optimizer on each worker by calling JuMP.set_optimizer(model, solver).

source

Forward passes

SDDP.AbstractForwardPassType
AbstractForwardPass

Abstract type for different forward passes.

source
SDDP.DefaultForwardPassType
DefaultForwardPass(; include_last_node::Bool = true)

The default forward pass.

If include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).

source
SDDP.RevisitingForwardPassType
RevisitingForwardPass(
+)

Run SDDP in asynchronous mode workers with pid's slave_pids.

Set the optimizer on each worker by calling JuMP.set_optimizer(model, solver).

source

Forward passes

SDDP.AbstractForwardPassType
AbstractForwardPass

Abstract type for different forward passes.

source
SDDP.DefaultForwardPassType
DefaultForwardPass(; include_last_node::Bool = true)

The default forward pass.

If include_last_node = false and the sample terminated due to a cycle, then the last node (which forms the cycle) is omitted. This can be useful option to set when training, but it comes at the cost of not knowing which node formed the cycle (if there are multiple possibilities).

source
SDDP.RevisitingForwardPassType
RevisitingForwardPass(
     period::Int = 500;
     sub_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.

Set period = typemax(Int) to disable.

For example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....

source
SDDP.RiskAdjustedForwardPassType
RiskAdjustedForwardPass(;
+)

A forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.

Set period = typemax(Int) to disable.

For example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....

source
SDDP.RiskAdjustedForwardPassType
RiskAdjustedForwardPass(;
     forward_pass::AbstractForwardPass,
     risk_measure::AbstractRiskMeasure,
     resampling_probability::Float64,
     rejection_count::Int = 5,
-)

A forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.

The forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.

Note that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.

source
SDDP.AlternativeForwardPassType
AlternativeForwardPass(
+)

A forward pass that resamples a previous forward pass with resampling_probability probability, and otherwise samples a new forward pass using forward_pass.

The forward pass to revisit is chosen based on the risk-adjusted (using risk_measure) probability of the cumulative stage objectives.

Note that this objective corresponds to the first time we visited the trajectory. Subsequent visits may have improved things, but we don't have the mechanisms in-place to update it. Therefore, remove the forward pass from resampling consideration after rejection_count revisits.

source
SDDP.AlternativeForwardPassType
AlternativeForwardPass(
     forward_model::SDDP.PolicyGraph{T};
     forward_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.

When using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.

This forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.

For example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.

For more details see the paper:

Rosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.

source
SDDP.AlternativePostIterationCallbackType
AlternativePostIterationCallback(forward_model::PolicyGraph)

A post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.

source
SDDP.RegularizedForwardPassType
RegularizedForwardPass(;
+)

A forward pass that simulates using forward_model, which may be different to the model used in the backwards pass.

When using this forward pass, you should almost always pass SDDP.AlternativePostIterationCallback to the post_iteration_callback argument of SDDP.train.

This forward pass is most useful when the forward_model is non-convex and we use a convex approximation of the model in the backward pass.

For example, in optimal power flow models, we can use an AC-OPF formulation as the forward_model and a DC-OPF formulation as the backward model.

For more details see the paper:

Rosemberg, A., and Street, A., and Garcia, J.D., and Valladão, D.M., and Silva, T., and Dowson, O. (2021). Assessing the cost of network simplifications in long-term hydrothermal dispatch planning models. IEEE Transactions on Sustainable Energy. 13(1), 196-206.

source
SDDP.AlternativePostIterationCallbackType
AlternativePostIterationCallback(forward_model::PolicyGraph)

A post-iteration callback that should be used whenever SDDP.AlternativeForwardPass is used.

source
SDDP.RegularizedForwardPassType
RegularizedForwardPass(;
     rho::Float64 = 0.05,
     forward_pass::AbstractForwardPass = DefaultForwardPass(),
-)

A forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.

By default, rho is set to 5%, which seems to work well empirically.

Pass a different forward_pass to control the forward pass within the regularized forward pass.

This forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.

source

Risk Measures

SDDP.AbstractRiskMeasureType
AbstractRiskMeasure

The abstract type for the risk measure interface.

You need to define the following methods:

source
SDDP.adjust_probabilityFunction
adjust_probability(
+)

A forward pass that regularizes the outgoing first-stage state variables with an L-infty trust-region constraint about the previous iteration's solution. Specifically, the bounds of the outgoing state variable x are updated from (l, u) to max(l, x^k - rho * (u - l)) <= x <= min(u, x^k + rho * (u - l)), where x^k is the optimal solution of x in the previous iteration. On the first iteration, the value of the state at the root node is used.

By default, rho is set to 5%, which seems to work well empirically.

Pass a different forward_pass to control the forward pass within the regularized forward pass.

This forward pass is largely intended to be used for investment problems in which the first stage makes a series of capacity decisions that then influence the rest of the graph. An error is thrown if the first stage problem is not deterministic, and states are silently skipped if they do not have finite bounds.

source

Risk Measures

SDDP.AbstractRiskMeasureType
AbstractRiskMeasure

The abstract type for the risk measure interface.

You need to define the following methods:

source
SDDP.adjust_probabilityFunction
adjust_probability(
     measure::Expectation
     risk_adjusted_probability::Vector{Float64},
     original_probability::Vector{Float64},
     noise_support::Vector{Noise{T}},
     objective_realizations::Vector{Float64},
     is_minimization::Bool,
-) where {T}
source

Duality handlers

SDDP.AbstractDualityHandlerType
AbstractDualityHandler

The abstract type for the duality handler interface.

source
SDDP.ContinuousConicDualityType
ContinuousConicDuality()

Compute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
+) where {T}
source

Duality handlers

SDDP.AbstractDualityHandlerType
AbstractDualityHandler

The abstract type for the duality handler interface.

source
SDDP.ContinuousConicDualityType
ContinuousConicDuality()

Compute dual variables in the backward pass using conic duality, relaxing any binary or integer restrictions as necessary.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

where S ⊆ ℝ×ℤ, we relax integrality and using conic duality to solve for λ in the problem:

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w)
-    x̄ - x == 0          [λ]
source
SDDP.LagrangianDualityType
LagrangianDuality(;
+    x̄ - x == 0          [λ]
source
SDDP.LagrangianDualityType
LagrangianDuality(;
     method::LocalImprovementSearch.AbstractSearchMethod =
         LocalImprovementSearch.BFGS(100),
 )

Obtain dual variables in the backward pass using Lagrangian duality.

Arguments

  • method: the LocalImprovementSearch method for maximizing the Lagrangian dual problem.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

where S ⊆ ℝ×ℤ, we solve the problem max L(λ), where:

L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' h(x̄)
-        st (x̄, x′, u) in Xᵢ(w) ∩ S

and where h(x̄) = x̄ - x.

source
SDDP.StrengthenedConicDualityType
StrengthenedConicDuality()

Obtain dual variables in the backward pass using strengthened conic duality.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
+        st (x̄, x′, u) in Xᵢ(w) ∩ S

and where h(x̄) = x̄ - x.

source
SDDP.StrengthenedConicDualityType
StrengthenedConicDuality()

Obtain dual variables in the backward pass using strengthened conic duality.

Theory

Given the problem

min Cᵢ(x̄, u, w) + θᵢ
  st (x̄, x′, u) in Xᵢ(w) ∩ S
     x̄ - x == 0          [λ]

we first obtain an estimate for λ using ContinuousConicDuality.

Then, we evaluate the Lagrangian function:

L(λ) = min Cᵢ(x̄, u, w) + θᵢ - λ' (x̄ - x`)
-        st (x̄, x′, u) in Xᵢ(w) ∩ S

to obtain a better estimate of the intercept.

source
SDDP.BanditDualityType
BanditDuality()

Formulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:

Our problem isn't a typical multi-armed bandit for a two reasons:

  1. The reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.
  2. The distribution of rewards is dependent on the history of the arms that were chosen.

We choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.

In future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.

source

Simulating the policy

SDDP.simulateFunction
simulate(
+        st (x̄, x′, u) in Xᵢ(w) ∩ S

to obtain a better estimate of the intercept.

source
SDDP.BanditDualityType
BanditDuality()

Formulates the problem of choosing a duality handler as a multi-armed bandit problem. The arms to choose between are:

Our problem isn't a typical multi-armed bandit for a two reasons:

  1. The reward distribution is non-stationary (each arm converges to 0 as it keeps getting pulled.
  2. The distribution of rewards is dependent on the history of the arms that were chosen.

We choose a very simple heuristic: pick the arm with the best mean + 1 standard deviation. That should ensure we consistently pick the arm with the best likelihood of improving the value function.

In future, we should consider discounting the rewards of earlier iterations, and focus more on the more-recent rewards.

source

Simulating the policy

SDDP.simulateFunction
simulate(
     model::PolicyGraph,
     number_replications::Int = 1,
     variables::Vector{Symbol} = Symbol[];
@@ -426,65 +426,65 @@
     custom_recorders = Dict{Symbol, Function}(
         :constraint_dual => sp -> JuMP.dual(sp[:my_constraint])
     )
-)

The value of the dual in the first stage of the second replication can be accessed as:

simulation_results[2][1][:constraint_dual]
source
SDDP.calculate_boundFunction
SDDP.calculate_bound(
+)

The value of the dual in the first stage of the second replication can be accessed as:

simulation_results[2][1][:constraint_dual]
source
SDDP.calculate_boundFunction
SDDP.calculate_bound(
     model::PolicyGraph,
     state::Dict{Symbol,Float64} = model.initial_root_state;
     risk_measure::AbstractRiskMeasure = Expectation(),
-)

Calculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.

source
SDDP.add_all_cutsFunction
add_all_cuts(model::PolicyGraph)

Add all cuts that may have been deleted back into the model.

Explanation

During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.

These can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).

This function ensures that all cuts discovered are added back into the model.

You should call this after train and before simulate.

source

Decision rules

SDDP.DecisionRuleType
DecisionRule(model::PolicyGraph{T}; node::T)

Create a decision rule for node node in model.

Example

rule = SDDP.DecisionRule(model; node = 1)
source
SDDP.evaluateFunction
evaluate(
+)

Calculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.

source
SDDP.add_all_cutsFunction
add_all_cuts(model::PolicyGraph)

Add all cuts that may have been deleted back into the model.

Explanation

During the solve, SDDP.jl may decide to remove cuts for a variety of reasons.

These can include cuts that define the optimal value function, particularly around the extremes of the state-space (e.g., reservoirs empty).

This function ensures that all cuts discovered are added back into the model.

You should call this after train and before simulate.

source

Decision rules

SDDP.DecisionRuleType
DecisionRule(model::PolicyGraph{T}; node::T)

Create a decision rule for node node in model.

Example

rule = SDDP.DecisionRule(model; node = 1)
source
SDDP.evaluateFunction
evaluate(
     rule::DecisionRule;
     incoming_state::Dict{Symbol,Float64},
     noise = nothing,
     controls_to_record = Symbol[],
-)

Evalute the decision rule rule at the point described by the incoming_state and noise.

If the node is deterministic, omit the noise argument.

Pass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.

source
evaluate(
+)

Evalute the decision rule rule at the point described by the incoming_state and noise.

If the node is deterministic, omit the noise argument.

Pass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.

source
evaluate(
     V::ValueFunction,
     point::Dict{Union{Symbol,String},<:Real}
     objective_state = nothing,
     belief_state = nothing
-)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
evalute(V::ValueFunction{Nothing, Nothing}; kwargs...)

Evalute the value function V at the point in the state-space specified by kwargs.

Examples

evaluate(V; volume = 1)
source
evaluate(
+)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
evalute(V::ValueFunction{Nothing, Nothing}; kwargs...)

Evalute the value function V at the point in the state-space specified by kwargs.

Examples

evaluate(V; volume = 1)
source
evaluate(
     model::PolicyGraph{T},
     validation_scenarios::ValidationScenarios{T,S},
 ) where {T,S}

Evaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
 train(model; iteration_limit = 100)
-simulations = evaluate(model, validation_scenarios)
source

Visualizing the policy

SDDP.SpaghettiPlotType
SDDP.SpaghettiPlot(; stages, scenarios)

Initialize a new SpaghettiPlot with stages stages and scenarios number of replications.

source
SDDP.add_spaghettiFunction
SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)

Description

Add a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).

Keyword arguments

  • xlabel: set the xaxis label
  • ylabel: set the yaxis label
  • title: set the title of the plot
  • ymin: set the minimum y value
  • ymax: set the maximum y value
  • cumulative: plot the additive accumulation of the value across the stages
  • interpolate: interpolation method for lines between stages.

Defaults to "linear" see the d3 docs for all options.

Examples

simulations = simulate(model, 10)
+simulations = evaluate(model, validation_scenarios)
source

Visualizing the policy

SDDP.SpaghettiPlotType
SDDP.SpaghettiPlot(; stages, scenarios)

Initialize a new SpaghettiPlot with stages stages and scenarios number of replications.

source
SDDP.add_spaghettiFunction
SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)

Description

Add a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).

Keyword arguments

  • xlabel: set the xaxis label
  • ylabel: set the yaxis label
  • title: set the title of the plot
  • ymin: set the minimum y value
  • ymax: set the maximum y value
  • cumulative: plot the additive accumulation of the value across the stages
  • interpolate: interpolation method for lines between stages.

Defaults to "linear" see the d3 docs for all options.

Examples

simulations = simulate(model, 10)
 plt = SDDP.spaghetti_plot(simulations)
 SDDP.add_spaghetti(plt; title = "Stage objective") do data
     return data[:stage_objective]
-end
source
SDDP.publication_plotFunction
SDDP.publication_plot(
+end
source
SDDP.publication_plotFunction
SDDP.publication_plot(
     data_function, simulations;
     quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],
     kwargs...)

Create a Plots.jl recipe plot of the simulations.

See Plots.jl for the list of keyword arguments.

Examples

SDDP.publication_plot(simulations; title = "My title") do data
     return data[:stage_objective]
-end
source
SDDP.ValueFunctionType
ValueFunction

A representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.

It supports three types of state variables:

  1. x - convex "resource" states
  2. b - concave "belief" states
  3. y - concave "objective" states

In addition, we have three types of cuts:

  1. Single-cuts (also called "average" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.
  2. Multi-cuts, which use a different cost-to-go term for each realization w.
  3. Risk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.

Therefore, ValueFunction returns a JuMP model of the following form:

V(x, b, y) = min: μᵀb + νᵀy + θ
+end
source
SDDP.ValueFunctionType
ValueFunction

A representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.

It supports three types of state variables:

  1. x - convex "resource" states
  2. b - concave "belief" states
  3. y - concave "objective" states

In addition, we have three types of cuts:

  1. Single-cuts (also called "average" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.
  2. Multi-cuts, which use a different cost-to-go term for each realization w.
  3. Risk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.

Therefore, ValueFunction returns a JuMP model of the following form:

V(x, b, y) = min: μᵀb + νᵀy + θ
              s.t. # "Single" / "Average" cuts
                   μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J
                   # "Multi" cuts
                   μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K
                   # "Risk-set" cuts
-                  θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
source
SDDP.evaluateMethod
evaluate(
+                  θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K
source
SDDP.evaluateMethod
evaluate(
     V::ValueFunction,
     point::Dict{Union{Symbol,String},<:Real}
     objective_state = nothing,
     belief_state = nothing
-)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
SDDP.plotFunction
plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)

The SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.

source

Debugging the model

SDDP.write_subproblem_to_fileFunction
write_subproblem_to_file(
+)

Evaluate the value function V at point in the state-space.

Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.

Examples

evaluate(V, Dict(:volume => 1.0))

If the state variable is constructed like @variable(sp, volume[1:4] >= 0, SDDP.State, initial_value = 0.0), use [i] to index the state variable:

evaluate(V, Dict(Symbol("volume[1]") => 1.0))

You can also use strings or symbols for the keys.

evaluate(V, Dict("volume[1]" => 1))
source
SDDP.plotFunction
plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)

The SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.

source

Debugging the model

SDDP.write_subproblem_to_fileFunction
write_subproblem_to_file(
     node::Node,
     filename::String;
     throw_error::Bool = false,
-)

Write the subproblem contained in node to the file filename.

The throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.

Example

SDDP.write_subproblem_to_file(model[1], "subproblem_1.lp")
source
SDDP.deterministic_equivalentFunction
deterministic_equivalent(
+)

Write the subproblem contained in node to the file filename.

The throw_error is an argument used internally by SDDP.jl. If set, an error will be thrown.

Example

SDDP.write_subproblem_to_file(model[1], "subproblem_1.lp")
source
SDDP.deterministic_equivalentFunction
deterministic_equivalent(
     pg::PolicyGraph{T},
     optimizer = nothing;
     time_limit::Union{Real,Nothing} = 60.0,
-)

Form a JuMP model that represents the deterministic equivalent of the problem.

Examples

deterministic_equivalent(model)
deterministic_equivalent(model, HiGHS.Optimizer)
source

StochOptFormat

SDDP.write_to_fileFunction
write_to_file(
+)

Form a JuMP model that represents the deterministic equivalent of the problem.

Examples

deterministic_equivalent(model)
deterministic_equivalent(model, HiGHS.Optimizer)
source

StochOptFormat

SDDP.write_to_fileFunction
write_to_file(
     model::PolicyGraph,
     filename::String;
     compression::MOI.FileFormats.AbstractCompressionScheme =
         MOI.FileFormats.AutomaticCompression(),
     kwargs...
-)

Write model to filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).

Examples

write_to_file(model, "my_model.sof.json"; validation_scenarios = 10)
source
SDDP.read_from_fileFunction
read_from_file(
+)

Write model to filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.write(::IO, ::PolicyGraph).

Examples

write_to_file(model, "my_model.sof.json"; validation_scenarios = 10)
source
SDDP.read_from_fileFunction
read_from_file(
     filename::String;
     compression::MOI.FileFormats.AbstractCompressionScheme =
         MOI.FileFormats.AutomaticCompression(),
     kwargs...
-)::Tuple{PolicyGraph, ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
source
Base.writeMethod
Base.write(
+)::Tuple{PolicyGraph, ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from filename in the StochOptFormat file format.

Pass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.

See Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.

Warning

This function is experimental. See the full warning in Base.read(::IO, ::Type{PolicyGraph}).

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
source
Base.writeMethod
Base.write(
     io::IO,
     model::PolicyGraph;
     validation_scenarios::Union{Nothing,Int,ValidationScenarios} = nothing,
@@ -500,15 +500,15 @@
         date = "2020-07-20",
         description = "Example problem for the SDDP.jl documentation",
     )
-end
source
Base.readMethod
Base.read(
+end
source
Base.readMethod
Base.read(
     io::IO,
     ::Type{PolicyGraph};
     bound::Float64 = 1e6,
 )::Tuple{PolicyGraph,ValidationScenarios}

Return a tuple containing a PolicyGraph object and a ValidationScenarios read from io in the StochOptFormat file format.

See also: evaluate.

Compatibility

Warning

This function is experimental. Things may change between commits. You should not rely on this functionality as a long-term file format (yet).

In addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:

  • Additive random variables in the constraints or in the objective
  • Multiplicative random variables in the objective

If your model uses something other than this, this function may throw an error or silently build a non-convex model.

Examples

open("my_model.sof.json", "r") do io
     model, validation_scenarios = read(io, PolicyGraph)
-end
source
SDDP.evaluateMethod
evaluate(
+end
source
SDDP.evaluateMethod
evaluate(
     model::PolicyGraph{T},
     validation_scenarios::ValidationScenarios{T,S},
 ) where {T,S}

Evaluate the performance of the policy contained in model after a call to train on the scenarios specified by validation_scenarios.

Examples

model, validation_scenarios = read_from_file("my_model.sof.json")
 train(model; iteration_limit = 100)
-simulations = evaluate(model, validation_scenarios)
source
SDDP.ValidationScenariosType
ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})

An AbstractSamplingScheme based on a vector of scenarios.

Each scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.

source
SDDP.ValidationScenarioType
ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})

A single scenario for testing.

See also: ValidationScenarios.

source
+simulations = evaluate(model, validation_scenarios)source
SDDP.ValidationScenariosType
ValidationScenario{T,S}(scenarios::Vector{ValidationScenario{T,S}})

An AbstractSamplingScheme based on a vector of scenarios.

Each scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.

source
SDDP.ValidationScenarioType
ValidationScenario{T,S}(scenario::Vector{Tuple{T,S}})

A single scenario for testing.

See also: ValidationScenarios.

source
diff --git a/dev/changelog/index.html b/dev/changelog/index.html index 93a735f45..301efa56f 100644 --- a/dev/changelog/index.html +++ b/dev/changelog/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-HZQQDVMPZW', {'page_path': location.pathname + location.search + location.hash}); -

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.10.1 (November 28, 2024)

Fixed

Other

  • Documentation updates (#801)

v1.10.0 (November 19, 2024)

Added

  • Added root_node_risk_measure keyword to train (#804)

Fixed

  • Fixed a bug with cut sharing in a graph with zero-probability arcs (#797)

Other

v1.9.0 (October 17, 2024)

Added

Fixed

  • Fixed the tests to skip threading tests if running in serial (#770)
  • Fixed BanditDuality to handle the case where the standard deviation is NaN (#779)
  • Fixed an error when lagged state variables are encountered in MSPFormat (#786)
  • Fixed publication_plot with replications of different lengths (#788)
  • Fixed CTRL+C interrupting the code at unsafe points (#789)

Other

  • Documentation improvements (#771) (#772)
  • Updated printing because of changes in JuMP (#773)

v1.8.1 (August 5, 2024)

Fixed

  • Fixed various issues with SDDP.Threaded() (#761)
  • Fixed a deprecation warning for sorting a dictionary (#763)

Other

  • Updated copyright notices (#762)
  • Updated .JuliaFormatter.toml (#764)

v1.8.0 (July 24, 2024)

Added

  • Added SDDP.Threaded(), which is an experimental parallel scheme that supports solving problems using multiple threads. Some parts of SDDP.jl may not be thread-safe, and this can cause incorrect results, segfaults, or other errors. Please use with care and report any issues by opening a GitHub issue. (#758)

Other

  • Documentation improvements and fixes (#747) (#759)

v1.7.0 (June 4, 2024)

Added

  • Added sample_backward_noise_terms_with_state for creating backward pass sampling schemes that depend on the current primal state. (#742) (Thanks @arthur-brigatto)

Fixed

  • Fixed error message when publication_plot has non-finite data (#738)

Other

  • Updated the logo constructor (#730)

v1.6.7 (February 1, 2024)

Fixed

  • Fixed non-constant state dimension in the MSPFormat reader (#695)
  • Fixed SimulatorSamplingScheme for deterministic nodes (#710)
  • Fixed line search in BFGS (#711)
  • Fixed handling of NEARLY_FEASIBLE_POINT status (#726)

Other

  • Documentation improvements (#692) (#694) (#706) (#716) (#727)
  • Updated to StochOptFormat v1.0 (#705)
  • Added an experimental OuterApproximation algorithm (#709)
  • Updated .gitignore (#717)
  • Added code for MDP paper (#720) (#721)
  • Added Google analytics (#723)

v1.6.6 (September 29, 2023)

Other

v1.6.5 (September 25, 2023)

Fixed

Other

  • Updated tutorials (#677) (#678) (#682) (#683)
  • Fixed documentation preview (#679)

v1.6.4 (September 23, 2023)

Fixed

Other

  • Documentation updates (#658) (#666) (#671)
  • Switch to GitHub action for deploying docs (#668) (#670)
  • Update to Documenter@1 (#669)

v1.6.3 (September 8, 2023)

Fixed

  • Fixed default stopping rule with iteration_limit or time_limit set (#662)

Other

  • Various documentation improvements (#651) (#657) (#659) (#660)

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

  • Updated Plotting tools to use live plots (#563)
  • Added vale as a linter (#565)
  • Improved documentation for initializing a parallel scheme (#566)

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

  • Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)
  • Update to JuMP v0.23 (#514)
  • Added auto-regressive tutorial (#507)

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

  • A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#449) (#453)

Other

  • Documentation improvements (#447) (#448) (#450)

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

  • Fixed scoping bug in SDDP.@stageobjective (#407)
  • Fixed a bug when the initial point is infeasible (#411)
  • Set subproblems to silent by default (#409)

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

  • Documentation improvements (#367) (#369) (#370)

v0.3.7 (January 8, 2021)

Other

  • Documentation improvements (#362) (#363) (#365) (#366)
  • Bump copyright (#364)

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

  • Documentation improvements (#327) (#333) (#339) (#340)

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

+

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.10.1 (November 28, 2024)

Fixed

Other

  • Documentation updates (#801)

v1.10.0 (November 19, 2024)

Added

  • Added root_node_risk_measure keyword to train (#804)

Fixed

  • Fixed a bug with cut sharing in a graph with zero-probability arcs (#797)

Other

v1.9.0 (October 17, 2024)

Added

Fixed

  • Fixed the tests to skip threading tests if running in serial (#770)
  • Fixed BanditDuality to handle the case where the standard deviation is NaN (#779)
  • Fixed an error when lagged state variables are encountered in MSPFormat (#786)
  • Fixed publication_plot with replications of different lengths (#788)
  • Fixed CTRL+C interrupting the code at unsafe points (#789)

Other

  • Documentation improvements (#771) (#772)
  • Updated printing because of changes in JuMP (#773)

v1.8.1 (August 5, 2024)

Fixed

  • Fixed various issues with SDDP.Threaded() (#761)
  • Fixed a deprecation warning for sorting a dictionary (#763)

Other

  • Updated copyright notices (#762)
  • Updated .JuliaFormatter.toml (#764)

v1.8.0 (July 24, 2024)

Added

  • Added SDDP.Threaded(), which is an experimental parallel scheme that supports solving problems using multiple threads. Some parts of SDDP.jl may not be thread-safe, and this can cause incorrect results, segfaults, or other errors. Please use with care and report any issues by opening a GitHub issue. (#758)

Other

  • Documentation improvements and fixes (#747) (#759)

v1.7.0 (June 4, 2024)

Added

  • Added sample_backward_noise_terms_with_state for creating backward pass sampling schemes that depend on the current primal state. (#742) (Thanks @arthur-brigatto)

Fixed

  • Fixed error message when publication_plot has non-finite data (#738)

Other

  • Updated the logo constructor (#730)

v1.6.7 (February 1, 2024)

Fixed

  • Fixed non-constant state dimension in the MSPFormat reader (#695)
  • Fixed SimulatorSamplingScheme for deterministic nodes (#710)
  • Fixed line search in BFGS (#711)
  • Fixed handling of NEARLY_FEASIBLE_POINT status (#726)

Other

  • Documentation improvements (#692) (#694) (#706) (#716) (#727)
  • Updated to StochOptFormat v1.0 (#705)
  • Added an experimental OuterApproximation algorithm (#709)
  • Updated .gitignore (#717)
  • Added code for MDP paper (#720) (#721)
  • Added Google analytics (#723)

v1.6.6 (September 29, 2023)

Other

v1.6.5 (September 25, 2023)

Fixed

Other

  • Updated tutorials (#677) (#678) (#682) (#683)
  • Fixed documentation preview (#679)

v1.6.4 (September 23, 2023)

Fixed

Other

  • Documentation updates (#658) (#666) (#671)
  • Switch to GitHub action for deploying docs (#668) (#670)
  • Update to Documenter@1 (#669)

v1.6.3 (September 8, 2023)

Fixed

  • Fixed default stopping rule with iteration_limit or time_limit set (#662)

Other

  • Various documentation improvements (#651) (#657) (#659) (#660)

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

  • Updated Plotting tools to use live plots (#563)
  • Added vale as a linter (#565)
  • Improved documentation for initializing a parallel scheme (#566)

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

  • Fixed many typos (#500) (#501) (#506) (#511) (Thanks @bfpc)
  • Update to JuMP v0.23 (#514)
  • Added auto-regressive tutorial (#507)

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

  • A large refactoring for how we handle stochastic integer programs. This added support for things like SDDP.ContinuousConicDuality and SDDP.LagrangianDuality. It was breaking because we removed the integrality_handler argument to PolicyGraph. (#449) (#453)

Other

  • Documentation improvements (#447) (#448) (#450)

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

  • Fixed scoping bug in SDDP.@stageobjective (#407)
  • Fixed a bug when the initial point is infeasible (#411)
  • Set subproblems to silent by default (#409)

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

  • Documentation improvements (#367) (#369) (#370)

v0.3.7 (January 8, 2021)

Other

  • Documentation improvements (#362) (#363) (#365) (#366)
  • Bump copyright (#364)

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

  • Documentation improvements (#327) (#333) (#339) (#340)

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

diff --git a/dev/examples/FAST_hydro_thermal/index.html b/dev/examples/FAST_hydro_thermal/index.html index 8cba8acd5..1f17216ec 100644 --- a/dev/examples/FAST_hydro_thermal/index.html +++ b/dev/examples/FAST_hydro_thermal/index.html @@ -66,13 +66,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 -1.000000e+01 2.931833e-03 5 1 - 20 0.000000e+00 -1.000000e+01 1.506186e-02 104 1 + 1 0.000000e+00 -1.000000e+01 2.671957e-03 5 1 + 20 0.000000e+00 -1.000000e+01 1.492906e-02 104 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.506186e-02 +total time (s) : 1.492906e-02 total solves : 104 best bound : -1.000000e+01 simulation ci : -9.000000e+00 ± 4.474009e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/FAST_production_management/index.html b/dev/examples/FAST_production_management/index.html index d723ef365..7ac87fedb 100644 --- a/dev/examples/FAST_production_management/index.html +++ b/dev/examples/FAST_production_management/index.html @@ -35,4 +35,4 @@ end fast_production_management(; cut_type = SDDP.SINGLE_CUT) -fast_production_management(; cut_type = SDDP.MULTI_CUT)
Test Passed
+fast_production_management(; cut_type = SDDP.MULTI_CUT)
Test Passed
diff --git a/dev/examples/FAST_quickstart/index.html b/dev/examples/FAST_quickstart/index.html index ad47215f6..9197f4c2d 100644 --- a/dev/examples/FAST_quickstart/index.html +++ b/dev/examples/FAST_quickstart/index.html @@ -33,4 +33,4 @@ @test SDDP.calculate_bound(model) == -2 end -fast_quickstart()
Test Passed
+fast_quickstart()
Test Passed
diff --git a/dev/examples/Hydro_thermal/index.html b/dev/examples/Hydro_thermal/index.html index 6e75d29ba..f68438002 100644 --- a/dev/examples/Hydro_thermal/index.html +++ b/dev/examples/Hydro_thermal/index.html @@ -59,15 +59,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.390000e+02 6.304440e+01 1.043270e-01 183 1 - 31 8.517170e+02 2.346450e+02 1.124316e+00 7701 1 - 53 2.848993e+02 2.362264e+02 2.153164e+00 13899 1 - 72 6.195245e+02 2.363946e+02 3.252110e+00 19692 1 - 83 1.925059e+02 2.364242e+02 4.276050e+00 24345 1 - 100 1.135002e+02 2.364293e+02 4.771086e+00 26640 1 + 1 2.390000e+02 6.304440e+01 1.108370e-01 183 1 + 31 8.517170e+02 2.346450e+02 1.133871e+00 7701 1 + 53 2.848993e+02 2.362264e+02 2.177198e+00 13899 1 + 71 2.110123e+02 2.363880e+02 3.185251e+00 19101 1 + 80 5.565011e+02 2.364217e+02 4.203508e+00 23652 1 + 100 1.135002e+02 2.364293e+02 4.920945e+00 26640 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.771086e+00 +total time (s) : 4.920945e+00 total solves : 26640 best bound : 2.364293e+02 simulation ci : 2.593398e+02 ± 5.186931e+01 @@ -75,4 +75,4 @@ -------------------------------------------------------------------

Simulating the policy

After training, we can simulate the policy using SDDP.simulate.

sims = SDDP.simulate(model, 100, [:g_t])
 mu = round(mean([s[1][:g_t] for s in sims]); digits = 2)
 println("On average, $(mu) units of thermal are used in the first stage.")
On average, 1.71 units of thermal are used in the first stage.

Extracting the water values

Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.

V = SDDP.ValueFunction(model[1])
-cost, price = SDDP.evaluate(V; x = 10)
(233.55074662683333, Dict(:x => -0.6602685305287201))
+cost, price = SDDP.evaluate(V; x = 10)
(233.55074662683333, Dict(:x => -0.6602685305287201))
diff --git a/dev/examples/SDDP.log b/dev/examples/SDDP.log index ffd45854c..79125b56b 100644 --- a/dev/examples/SDDP.log +++ b/dev/examples/SDDP.log @@ -25,11 +25,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 -1.000000e+01 2.931833e-03 5 1 - 20 0.000000e+00 -1.000000e+01 1.506186e-02 104 1 + 1 0.000000e+00 -1.000000e+01 2.671957e-03 5 1 + 20 0.000000e+00 -1.000000e+01 1.492906e-02 104 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.506186e-02 +total time (s) : 1.492906e-02 total solves : 104 best bound : -1.000000e+01 simulation ci : -9.000000e+00 ± 4.474009e+00 @@ -61,17 +61,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -2.396000e+01 -2.396000e+01 7.242918e-03 52 1 - 10 -4.260000e+01 -2.396000e+01 1.089501e-02 92 1 - 15 -4.260000e+01 -2.396000e+01 1.464987e-02 132 1 - 20 -4.260000e+01 -2.396000e+01 1.874304e-02 172 1 - 25 -2.396000e+01 -2.396000e+01 2.396584e-02 224 1 - 30 -4.260000e+01 -2.396000e+01 2.865195e-02 264 1 - 35 -2.396000e+01 -2.396000e+01 3.365898e-02 304 1 - 40 -2.396000e+01 -2.396000e+01 3.894401e-02 344 1 + 5 -2.396000e+01 -2.396000e+01 7.586956e-03 52 1 + 10 -4.260000e+01 -2.396000e+01 1.142001e-02 92 1 + 15 -4.260000e+01 -2.396000e+01 1.534605e-02 132 1 + 20 -4.260000e+01 -2.396000e+01 1.950407e-02 172 1 + 25 -2.396000e+01 -2.396000e+01 2.481008e-02 224 1 + 30 -4.260000e+01 -2.396000e+01 2.948308e-02 264 1 + 35 -2.396000e+01 -2.396000e+01 3.444910e-02 304 1 + 40 -2.396000e+01 -2.396000e+01 3.982902e-02 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.894401e-02 +total time (s) : 3.982902e-02 total solves : 344 best bound : -2.396000e+01 simulation ci : -2.660914e+01 ± 3.908038e+00 @@ -81,21 +81,21 @@ numeric issues : 0 ──────────────────────────────────────────────────────────────────────────────── Time Allocations ─────────────────────── ──────────────────────── - Tot / % measured: 46.8ms / 73.5% 32.8MiB / 20.6% + Tot / % measured: 92.9ms / 37.8% 32.9MiB / 20.7% Section ncalls time %tot avg alloc %tot avg ──────────────────────────────────────────────────────────────────────────────── -backward_pass 40 21.3ms 61.8% 531μs 5.80MiB 85.6% 148KiB - solve_subproblem 160 11.8ms 34.3% 73.7μs 871KiB 12.6% 5.44KiB - get_dual_solution 160 590μs 1.7% 3.69μs 190KiB 2.7% 1.19KiB - prepare_backward... 160 26.3μs 0.1% 165ns 0.00B 0.0% 0.00B -forward_pass 40 7.86ms 22.9% 197μs 768KiB 11.1% 19.2KiB - solve_subproblem 120 6.98ms 20.3% 58.2μs 588KiB 8.5% 4.90KiB - get_dual_solution 120 65.3μs 0.2% 544ns 16.9KiB 0.2% 144B - sample_scenario 40 125μs 0.4% 3.13μs 24.5KiB 0.4% 628B -calculate_bound 40 5.24ms 15.2% 131μs 224KiB 3.2% 5.61KiB - get_dual_solution 40 32.7μs 0.1% 818ns 5.62KiB 0.1% 144B -get_dual_solution 36 17.8μs 0.1% 495ns 5.06KiB 0.1% 144B +backward_pass 40 21.5ms 61.2% 537μs 5.82MiB 85.7% 149KiB + solve_subproblem 160 12.5ms 35.6% 78.1μs 871KiB 12.5% 5.44KiB + get_dual_solution 160 584μs 1.7% 3.65μs 190KiB 2.7% 1.19KiB + prepare_backward... 160 26.2μs 0.1% 164ns 0.00B 0.0% 0.00B +forward_pass 40 8.25ms 23.5% 206μs 768KiB 11.0% 19.2KiB + solve_subproblem 120 7.37ms 21.0% 61.4μs 588KiB 8.4% 4.90KiB + get_dual_solution 120 68.3μs 0.2% 569ns 16.9KiB 0.2% 144B + sample_scenario 40 137μs 0.4% 3.43μs 24.5KiB 0.4% 628B +calculate_bound 40 5.37ms 15.3% 134μs 224KiB 3.2% 5.61KiB + get_dual_solution 40 30.0μs 0.1% 751ns 5.62KiB 0.1% 144B +get_dual_solution 36 19.0μs 0.1% 527ns 5.06KiB 0.1% 144B ──────────────────────────────────────────────────────────────────────────────── ------------------------------------------------------------------- @@ -123,17 +123,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -5.320000e+00 -2.396000e+01 6.703854e-03 52 1 - 10 -5.320000e+00 -2.396000e+01 1.079798e-02 92 1 - 15 -2.396000e+01 -2.396000e+01 1.530695e-02 132 1 - 20 -5.320000e+00 -2.396000e+01 2.040386e-02 172 1 - 25 -4.260000e+01 -2.396000e+01 2.694488e-02 224 1 - 30 -2.396000e+01 -2.396000e+01 3.322196e-02 264 1 - 35 -2.396000e+01 -2.396000e+01 7.716489e-02 304 1 - 40 -2.396000e+01 -2.396000e+01 8.779001e-02 344 1 + 5 -5.320000e+00 -2.396000e+01 7.218122e-03 52 1 + 10 -5.320000e+00 -2.396000e+01 1.132917e-02 92 1 + 15 -2.396000e+01 -2.396000e+01 1.589417e-02 132 1 + 20 -5.320000e+00 -2.396000e+01 2.092409e-02 172 1 + 25 -4.260000e+01 -2.396000e+01 2.759600e-02 224 1 + 30 -2.396000e+01 -2.396000e+01 3.376913e-02 264 1 + 35 -2.396000e+01 -2.396000e+01 4.043603e-02 304 1 + 40 -2.396000e+01 -2.396000e+01 4.783297e-02 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.779001e-02 +total time (s) : 4.783297e-02 total solves : 344 best bound : -2.396000e+01 simulation ci : -1.957570e+01 ± 3.890802e+00 @@ -143,21 +143,21 @@ numeric issues : 0 ──────────────────────────────────────────────────────────────────────────────── Time Allocations ─────────────────────── ──────────────────────── - Tot / % measured: 92.5ms / 54.1% 38.8MiB / 33.0% + Tot / % measured: 52.0ms / 83.6% 38.7MiB / 32.8% Section ncalls time %tot avg alloc %tot avg ──────────────────────────────────────────────────────────────────────────────── -backward_pass 40 35.0ms 70.0% 876μs 11.8MiB 92.4% 303KiB - solve_subproblem 160 13.5ms 27.0% 84.6μs 872KiB 6.7% 5.45KiB - get_dual_solution 160 662μs 1.3% 4.14μs 190KiB 1.4% 1.19KiB - prepare_backward... 160 31.6μs 0.1% 197ns 0.00B 0.0% 0.00B -forward_pass 40 8.75ms 17.5% 219μs 768KiB 5.9% 19.2KiB - solve_subproblem 120 7.69ms 15.4% 64.1μs 588KiB 4.5% 4.90KiB - get_dual_solution 120 78.0μs 0.2% 650ns 16.9KiB 0.1% 144B - sample_scenario 40 155μs 0.3% 3.87μs 24.2KiB 0.2% 620B -calculate_bound 40 6.23ms 12.5% 156μs 226KiB 1.7% 5.66KiB - get_dual_solution 40 34.5μs 0.1% 862ns 5.62KiB 0.0% 144B -get_dual_solution 36 21.6μs 0.0% 601ns 5.06KiB 0.0% 144B +backward_pass 40 29.6ms 68.2% 741μs 11.7MiB 92.3% 300KiB + solve_subproblem 160 12.5ms 28.8% 78.3μs 872KiB 6.7% 5.45KiB + get_dual_solution 160 595μs 1.4% 3.72μs 190KiB 1.5% 1.19KiB + prepare_backward... 160 26.7μs 0.1% 167ns 0.00B 0.0% 0.00B +forward_pass 40 8.01ms 18.4% 200μs 768KiB 5.9% 19.2KiB + solve_subproblem 120 7.12ms 16.4% 59.4μs 588KiB 4.5% 4.90KiB + get_dual_solution 120 67.7μs 0.2% 564ns 16.9KiB 0.1% 144B + sample_scenario 40 131μs 0.3% 3.28μs 24.2KiB 0.2% 620B +calculate_bound 40 5.76ms 13.3% 144μs 226KiB 1.7% 5.66KiB + get_dual_solution 40 31.2μs 0.1% 781ns 5.62KiB 0.0% 144B +get_dual_solution 36 19.1μs 0.0% 531ns 5.06KiB 0.0% 144B ──────────────────────────────────────────────────────────────────────────────── ------------------------------------------------------------------- @@ -185,49 +185,49 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 -2.500000e+00 2.048969e-03 5 1 - 2 -1.500000e+00 -2.000000e+00 3.030062e-03 14 1 - 3 -1.000000e+00 -2.000000e+00 3.526926e-03 19 1 - 4 -1.000000e+00 -2.000000e+00 4.147053e-03 24 1 - 5 -2.000000e+00 -2.000000e+00 4.786015e-03 29 1 - 6 -2.000000e+00 -2.000000e+00 5.370855e-03 34 1 - 7 -2.000000e+00 -2.000000e+00 5.939960e-03 39 1 - 8 -2.000000e+00 -2.000000e+00 6.516933e-03 44 1 - 9 -2.000000e+00 -2.000000e+00 7.114887e-03 49 1 - 10 -2.000000e+00 -2.000000e+00 7.731915e-03 54 1 - 11 -2.000000e+00 -2.000000e+00 8.327007e-03 59 1 - 12 -2.000000e+00 -2.000000e+00 8.938074e-03 64 1 - 13 -2.000000e+00 -2.000000e+00 9.543896e-03 69 1 - 14 -2.000000e+00 -2.000000e+00 1.015902e-02 74 1 - 15 -2.000000e+00 -2.000000e+00 1.076698e-02 79 1 - 16 -2.000000e+00 -2.000000e+00 1.148295e-02 84 1 - 17 -2.000000e+00 -2.000000e+00 1.212287e-02 89 1 - 18 -2.000000e+00 -2.000000e+00 1.274490e-02 94 1 - 19 -2.000000e+00 -2.000000e+00 1.335692e-02 99 1 - 20 -2.000000e+00 -2.000000e+00 1.399088e-02 104 1 - 21 -2.000000e+00 -2.000000e+00 1.494288e-02 113 1 - 22 -2.000000e+00 -2.000000e+00 1.558685e-02 118 1 - 23 -2.000000e+00 -2.000000e+00 1.628304e-02 123 1 - 24 -2.000000e+00 -2.000000e+00 4.097295e-02 128 1 - 25 -2.000000e+00 -2.000000e+00 4.169893e-02 133 1 - 26 -2.000000e+00 -2.000000e+00 4.238105e-02 138 1 - 27 -2.000000e+00 -2.000000e+00 4.305196e-02 143 1 - 28 -2.000000e+00 -2.000000e+00 4.376602e-02 148 1 - 29 -2.000000e+00 -2.000000e+00 4.444885e-02 153 1 - 30 -2.000000e+00 -2.000000e+00 4.511595e-02 158 1 - 31 -2.000000e+00 -2.000000e+00 4.581308e-02 163 1 - 32 -2.000000e+00 -2.000000e+00 4.652691e-02 168 1 - 33 -2.000000e+00 -2.000000e+00 4.728103e-02 173 1 - 34 -2.000000e+00 -2.000000e+00 4.801106e-02 178 1 - 35 -2.000000e+00 -2.000000e+00 4.871202e-02 183 1 - 36 -2.000000e+00 -2.000000e+00 4.941297e-02 188 1 - 37 -2.000000e+00 -2.000000e+00 5.012608e-02 193 1 - 38 -2.000000e+00 -2.000000e+00 5.089688e-02 198 1 - 39 -2.000000e+00 -2.000000e+00 5.164194e-02 203 1 - 40 -2.000000e+00 -2.000000e+00 5.243397e-02 208 1 + 1 0.000000e+00 -2.500000e+00 2.017021e-03 5 1 + 2 -1.500000e+00 -2.000000e+00 3.000975e-03 14 1 + 3 -1.000000e+00 -2.000000e+00 3.490925e-03 19 1 + 4 -1.000000e+00 -2.000000e+00 4.055023e-03 24 1 + 5 -2.000000e+00 -2.000000e+00 4.667044e-03 29 1 + 6 -2.000000e+00 -2.000000e+00 5.240917e-03 34 1 + 7 -2.000000e+00 -2.000000e+00 5.825043e-03 39 1 + 8 -2.000000e+00 -2.000000e+00 6.390095e-03 44 1 + 9 -2.000000e+00 -2.000000e+00 6.998062e-03 49 1 + 10 -2.000000e+00 -2.000000e+00 7.586002e-03 54 1 + 11 -2.000000e+00 -2.000000e+00 8.188963e-03 59 1 + 12 -2.000000e+00 -2.000000e+00 8.788109e-03 64 1 + 13 -2.000000e+00 -2.000000e+00 9.386063e-03 69 1 + 14 -2.000000e+00 -2.000000e+00 9.989023e-03 74 1 + 15 -2.000000e+00 -2.000000e+00 1.064396e-02 79 1 + 16 -2.000000e+00 -2.000000e+00 1.125908e-02 84 1 + 17 -2.000000e+00 -2.000000e+00 1.187396e-02 89 1 + 18 -2.000000e+00 -2.000000e+00 1.248312e-02 94 1 + 19 -2.000000e+00 -2.000000e+00 1.310706e-02 99 1 + 20 -2.000000e+00 -2.000000e+00 1.372790e-02 104 1 + 21 -2.000000e+00 -2.000000e+00 1.470494e-02 113 1 + 22 -2.000000e+00 -2.000000e+00 1.535606e-02 118 1 + 23 -2.000000e+00 -2.000000e+00 1.599693e-02 123 1 + 24 -2.000000e+00 -2.000000e+00 1.664305e-02 128 1 + 25 -2.000000e+00 -2.000000e+00 1.729298e-02 133 1 + 26 -2.000000e+00 -2.000000e+00 1.794791e-02 138 1 + 27 -2.000000e+00 -2.000000e+00 1.864409e-02 143 1 + 28 -2.000000e+00 -2.000000e+00 1.932406e-02 148 1 + 29 -2.000000e+00 -2.000000e+00 1.999593e-02 153 1 + 30 -2.000000e+00 -2.000000e+00 2.066994e-02 158 1 + 31 -2.000000e+00 -2.000000e+00 2.134299e-02 163 1 + 32 -2.000000e+00 -2.000000e+00 2.202010e-02 168 1 + 33 -2.000000e+00 -2.000000e+00 2.274394e-02 173 1 + 34 -2.000000e+00 -2.000000e+00 2.345490e-02 178 1 + 35 -2.000000e+00 -2.000000e+00 2.416205e-02 183 1 + 36 -2.000000e+00 -2.000000e+00 2.486491e-02 188 1 + 37 -2.000000e+00 -2.000000e+00 2.561307e-02 193 1 + 38 -2.000000e+00 -2.000000e+00 2.632594e-02 198 1 + 39 -2.000000e+00 -2.000000e+00 4.851890e-02 203 1 + 40 -2.000000e+00 -2.000000e+00 4.927993e-02 208 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.243397e-02 +total time (s) : 4.927993e-02 total solves : 208 best bound : -2.000000e+00 simulation ci : -1.887500e+00 ± 1.189300e-01 @@ -259,15 +259,15 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.390000e+02 6.304440e+01 1.043270e-01 183 1 - 31 8.517170e+02 2.346450e+02 1.124316e+00 7701 1 - 53 2.848993e+02 2.362264e+02 2.153164e+00 13899 1 - 72 6.195245e+02 2.363946e+02 3.252110e+00 19692 1 - 83 1.925059e+02 2.364242e+02 4.276050e+00 24345 1 - 100 1.135002e+02 2.364293e+02 4.771086e+00 26640 1 + 1 2.390000e+02 6.304440e+01 1.108370e-01 183 1 + 31 8.517170e+02 2.346450e+02 1.133871e+00 7701 1 + 53 2.848993e+02 2.362264e+02 2.177198e+00 13899 1 + 71 2.110123e+02 2.363880e+02 3.185251e+00 19101 1 + 80 5.565011e+02 2.364217e+02 4.203508e+00 23652 1 + 100 1.135002e+02 2.364293e+02 4.920945e+00 26640 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.771086e+00 +total time (s) : 4.920945e+00 total solves : 26640 best bound : 2.364293e+02 simulation ci : 2.593398e+02 ± 5.186931e+01 @@ -300,19 +300,19 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -3.878303e+00 -4.434982e+00 1.919680e-01 1400 1 - 20 -4.262885e+00 -4.399265e+00 3.110759e-01 2800 1 - 30 -3.075162e+00 -4.382527e+00 4.385228e-01 4200 1 - 40 -3.761147e+00 -4.369587e+00 5.714748e-01 5600 1 - 50 -4.323162e+00 -4.362199e+00 7.641728e-01 7000 1 - 60 -3.654943e+00 -4.358401e+00 9.058180e-01 8400 1 - 70 -4.010883e+00 -4.357368e+00 1.048839e+00 9800 1 - 80 -4.314412e+00 -4.355714e+00 1.196367e+00 11200 1 - 90 -4.542422e+00 -4.353708e+00 1.348074e+00 12600 1 - 100 -4.178952e+00 -4.351685e+00 1.497844e+00 14000 1 + 10 -3.878303e+00 -4.434982e+00 1.942959e-01 1400 1 + 20 -4.262885e+00 -4.399265e+00 3.131590e-01 2800 1 + 30 -3.075162e+00 -4.382527e+00 4.379020e-01 4200 1 + 40 -3.761147e+00 -4.369587e+00 5.726449e-01 5600 1 + 50 -4.323162e+00 -4.362199e+00 7.124569e-01 7000 1 + 60 -3.654943e+00 -4.358401e+00 8.545969e-01 8400 1 + 70 -4.010883e+00 -4.357368e+00 9.996049e-01 9800 1 + 80 -4.314412e+00 -4.355714e+00 1.203137e+00 11200 1 + 90 -4.542422e+00 -4.353708e+00 1.356603e+00 12600 1 + 100 -4.178952e+00 -4.351685e+00 1.505242e+00 14000 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.497844e+00 +total time (s) : 1.505242e+00 total solves : 14000 best bound : -4.351685e+00 simulation ci : -4.246786e+00 ± 8.703997e-02 @@ -344,16 +344,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -1.573154e+00 -1.474247e+00 6.948113e-02 1050 1 - 20 -1.346690e+00 -1.471483e+00 1.082630e-01 1600 1 - 30 -1.308031e+00 -1.471307e+00 1.922801e-01 2650 1 - 40 -1.401200e+00 -1.471167e+00 2.350020e-01 3200 1 - 50 -1.557483e+00 -1.471097e+00 3.224430e-01 4250 1 - 60 -1.534169e+00 -1.471075e+00 3.679681e-01 4800 1 - 65 -1.689864e+00 -1.471075e+00 3.907061e-01 5075 1 + 10 -1.573154e+00 -1.474247e+00 6.942701e-02 1050 1 + 20 -1.346690e+00 -1.471483e+00 1.086791e-01 1600 1 + 30 -1.308031e+00 -1.471307e+00 1.911941e-01 2650 1 + 40 -1.401200e+00 -1.471167e+00 2.329950e-01 3200 1 + 50 -1.557483e+00 -1.471097e+00 3.202279e-01 4250 1 + 60 -1.534169e+00 -1.471075e+00 3.649149e-01 4800 1 + 65 -1.689864e+00 -1.471075e+00 3.875370e-01 5075 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.907061e-01 +total time (s) : 3.875370e-01 total solves : 5075 best bound : -1.471075e+00 simulation ci : -1.484094e+00 ± 4.058993e-02 @@ -387,14 +387,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.455904e+05 3.147347e+05 8.068085e-03 54 1 - 20 3.336455e+05 3.402383e+05 1.415706e-02 104 1 - 30 3.337559e+05 3.403155e+05 2.132511e-02 158 1 - 40 3.337559e+05 3.403155e+05 2.832103e-02 208 1 - 48 3.337559e+05 3.403155e+05 3.439808e-02 248 1 + 10 3.455904e+05 3.147347e+05 8.111954e-03 54 1 + 20 3.336455e+05 3.402383e+05 1.425099e-02 104 1 + 30 3.337559e+05 3.403155e+05 2.154803e-02 158 1 + 40 3.337559e+05 3.403155e+05 2.868700e-02 208 1 + 48 3.337559e+05 3.403155e+05 3.480482e-02 248 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.439808e-02 +total time (s) : 3.480482e-02 total solves : 248 best bound : 3.403155e+05 simulation ci : 1.351676e+08 ± 1.785770e+08 @@ -429,14 +429,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.403329e+05 3.509666e+05 1.356816e-02 92 1 - 20 4.055335e+05 4.054833e+05 2.453518e-02 172 1 - 30 3.959476e+05 4.067125e+05 3.774214e-02 264 1 - 40 3.959476e+05 4.067125e+05 5.097198e-02 344 1 - 47 3.959476e+05 4.067125e+05 6.104016e-02 400 1 + 10 4.403329e+05 3.509666e+05 1.326609e-02 92 1 + 20 4.055335e+05 4.054833e+05 2.928090e-02 172 1 + 30 3.959476e+05 4.067125e+05 4.300404e-02 264 1 + 40 3.959476e+05 4.067125e+05 5.655909e-02 344 1 + 47 3.959476e+05 4.067125e+05 6.679392e-02 400 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.104016e-02 +total time (s) : 6.679392e-02 total solves : 400 best bound : 4.067125e+05 simulation ci : 2.695623e+07 ± 3.645336e+07 @@ -470,11 +470,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.316000e+03 0.000000e+00 9.398389e-02 14 1 - 40 4.716000e+03 4.074139e+03 2.243450e-01 776 1 + 1 8.316000e+03 0.000000e+00 9.359312e-02 14 1 + 40 4.716000e+03 4.074139e+03 2.206481e-01 776 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.243450e-01 +total time (s) : 2.206481e-01 total solves : 776 best bound : 4.074139e+03 simulation ci : 4.477341e+03 ± 6.593738e+02 @@ -507,11 +507,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 7.000000e+04 6.166667e+04 5.635211e-01 8 1 - 40L 5.500000e+04 6.250000e+04 8.101661e-01 344 1 + 1L 7.000000e+04 6.166667e+04 5.657871e-01 8 1 + 40L 5.500000e+04 6.250000e+04 8.762000e-01 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.101661e-01 +total time (s) : 8.762000e-01 total solves : 344 best bound : 6.250000e+04 simulation ci : 6.091250e+04 ± 6.325667e+03 @@ -544,11 +544,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.000000e+04 6.250000e+04 3.736019e-03 8 1 - 20 6.000000e+04 6.250000e+04 4.383206e-02 172 1 + 1 3.000000e+04 6.250000e+04 3.748894e-03 8 1 + 20 6.000000e+04 6.250000e+04 5.618000e-02 172 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.383206e-02 +total time (s) : 5.618000e-02 total solves : 172 best bound : 6.250000e+04 simulation ci : 5.675000e+04 ± 6.792430e+03 @@ -580,11 +580,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 7.000000e+04 6.250000e+04 5.366087e-03 5 1 - 10 4.000000e+04 6.250000e+04 1.994109e-02 50 1 + 1 7.000000e+04 6.250000e+04 5.796909e-03 5 1 + 10 4.000000e+04 6.250000e+04 2.139091e-02 50 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.994109e-02 +total time (s) : 2.139091e-02 total solves : 50 best bound : 6.250000e+04 simulation ci : 6.300000e+04 ± 1.505505e+04 @@ -617,11 +617,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 9.000000e+00 3.800106e-02 6 1 - 20L 9.000000e+00 9.000000e+00 7.868314e-02 123 1 + 1L 6.000000e+00 9.000000e+00 3.926110e-02 6 1 + 20L 9.000000e+00 9.000000e+00 7.914114e-02 123 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.868314e-02 +total time (s) : 7.914114e-02 total solves : 123 best bound : 9.000000e+00 simulation ci : 8.850000e+00 ± 2.940000e-01 @@ -653,17 +653,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -5.684342e-14 1.184830e+00 1.329207e-02 87 1 - 10 5.012507e+01 1.508277e+00 1.996207e-02 142 1 - 15 -1.428571e+00 1.514085e+00 2.714801e-02 197 1 - 20 7.105427e-14 1.514085e+00 3.475404e-02 252 1 - 25 -3.979039e-13 1.514085e+00 8.906603e-02 339 1 - 30 -1.428571e+00 1.514085e+00 9.759498e-02 394 1 - 35 -1.428571e+00 1.514085e+00 1.068540e-01 449 1 - 40 0.000000e+00 1.514085e+00 1.408000e-01 504 1 + 5 -5.684342e-14 1.184830e+00 1.335406e-02 87 1 + 10 5.012507e+01 1.508277e+00 1.991916e-02 142 1 + 15 -1.428571e+00 1.514085e+00 2.718306e-02 197 1 + 20 7.105427e-14 1.514085e+00 3.494310e-02 252 1 + 25 -3.979039e-13 1.514085e+00 9.148598e-02 339 1 + 30 -1.428571e+00 1.514085e+00 1.001492e-01 394 1 + 35 -1.428571e+00 1.514085e+00 1.093709e-01 449 1 + 40 0.000000e+00 1.514085e+00 1.191351e-01 504 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.408000e-01 +total time (s) : 1.191351e-01 total solves : 504 best bound : 1.514085e+00 simulation ci : 2.863132e+00 ± 6.778637e+00 @@ -695,14 +695,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.100409e+00 1.301856e+00 1.554060e-01 278 1 - 20 1.263098e+01 1.278410e+00 1.754270e-01 428 1 - 30 -5.003795e+01 1.278410e+00 2.089329e-01 706 1 - 40 6.740000e+00 1.278410e+00 2.321758e-01 856 1 - 44 1.111084e+01 1.278410e+00 2.419429e-01 916 1 + 10 1.100409e+00 1.301856e+00 1.572721e-01 278 1 + 20 1.263098e+01 1.278410e+00 1.776161e-01 428 1 + 30 -5.003795e+01 1.278410e+00 2.127352e-01 706 1 + 40 6.740000e+00 1.278410e+00 2.364390e-01 856 1 + 44 1.111084e+01 1.278410e+00 2.464862e-01 916 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.419429e-01 +total time (s) : 2.464862e-01 total solves : 916 best bound : 1.278410e+00 simulation ci : 4.090025e+00 ± 5.358375e+00 @@ -734,13 +734,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.007061e+00 1.281639e+00 3.613997e-02 278 1 - 20 1.426676e+01 1.278410e+00 6.434894e-02 428 1 - 30 1.522212e+00 1.278410e+00 1.093080e-01 706 1 - 40 -4.523775e+01 1.278410e+00 1.482601e-01 856 1 + 10 2.007061e+00 1.281639e+00 3.626704e-02 278 1 + 20 1.426676e+01 1.278410e+00 6.425691e-02 428 1 + 30 1.522212e+00 1.278410e+00 1.093450e-01 706 1 + 40 -4.523775e+01 1.278410e+00 1.479840e-01 856 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.482601e-01 +total time (s) : 1.479840e-01 total solves : 856 best bound : 1.278410e+00 simulation ci : 1.019480e+00 ± 6.246418e+00 @@ -774,19 +774,19 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.787277e+00 9.346930e+00 1.381464e+00 900 1 - 20 6.374753e+00 1.361934e+01 1.551260e+00 1720 1 - 30 2.813321e+01 1.651297e+01 1.913003e+00 3036 1 - 40 1.654759e+01 1.632970e+01 2.283337e+00 4192 1 - 50 3.570941e+00 1.846889e+01 2.542172e+00 5020 1 - 60 1.087425e+01 1.890254e+01 2.836630e+00 5808 1 - 70 9.381610e+00 1.940320e+01 3.133666e+00 6540 1 - 80 5.648731e+01 1.962435e+01 3.355766e+00 7088 1 - 90 3.879273e+01 1.981008e+01 3.854946e+00 8180 1 - 100 7.870187e+00 1.997117e+01 4.082399e+00 8664 1 + 10 4.787277e+00 9.346930e+00 1.425595e+00 900 1 + 20 6.374753e+00 1.361934e+01 1.597725e+00 1720 1 + 30 2.813321e+01 1.651297e+01 1.927750e+00 3036 1 + 40 1.654759e+01 1.632970e+01 2.297534e+00 4192 1 + 50 3.570941e+00 1.846889e+01 2.570108e+00 5020 1 + 60 1.087425e+01 1.890254e+01 2.947895e+00 5808 1 + 70 9.381610e+00 1.940320e+01 3.244324e+00 6540 1 + 80 5.648731e+01 1.962435e+01 4.050954e+00 7088 1 + 90 3.879273e+01 1.981008e+01 4.613095e+00 8180 1 + 100 7.870187e+00 1.997117e+01 4.943725e+00 8664 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.082399e+00 +total time (s) : 4.943725e+00 total solves : 8664 best bound : 1.997117e+01 simulation ci : 2.275399e+01 ± 4.541987e+00 @@ -821,17 +821,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 9.000000e+00 9.002950e+00 1.336401e-01 235 1 - 10 4.000000e+00 9.002950e+00 1.538761e-01 310 1 - 15 4.000000e+00 9.002950e+00 1.748371e-01 385 1 - 20 4.000000e+00 9.002950e+00 1.959031e-01 460 1 - 25 1.000000e+01 9.002950e+00 2.689159e-01 695 1 - 30 5.000000e+00 9.002950e+00 2.912011e-01 770 1 - 35 1.000000e+01 9.002950e+00 3.147759e-01 845 1 - 40 5.000000e+00 9.002950e+00 3.384390e-01 920 1 + 5 9.000000e+00 9.002950e+00 1.384249e-01 235 1 + 10 4.000000e+00 9.002950e+00 1.591420e-01 310 1 + 15 4.000000e+00 9.002950e+00 1.806428e-01 385 1 + 20 4.000000e+00 9.002950e+00 2.022069e-01 460 1 + 25 1.000000e+01 9.002950e+00 2.772689e-01 695 1 + 30 5.000000e+00 9.002950e+00 2.999430e-01 770 1 + 35 1.000000e+01 9.002950e+00 3.237059e-01 845 1 + 40 5.000000e+00 9.002950e+00 3.480070e-01 920 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.384390e-01 +total time (s) : 3.480070e-01 total solves : 920 best bound : 9.002950e+00 simulation ci : 6.375000e+00 ± 7.930178e-01 @@ -866,15 +866,15 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.000000e+01 6.868919e+00 1.042249e-01 510 1 - 20 2.000000e+00 6.834387e+00 1.568151e-01 720 1 - 30 1.200000e+01 6.834387e+00 2.995811e-01 1230 1 - 40 7.000000e+00 6.823805e+00 3.526468e-01 1440 1 - 50 7.000000e+00 6.823805e+00 4.986689e-01 1950 1 - 60 5.000000e+00 6.823805e+00 5.530329e-01 2160 1 + 10 1.000000e+01 6.868919e+00 1.176000e-01 510 1 + 20 2.000000e+00 6.834387e+00 1.721110e-01 720 1 + 30 1.200000e+01 6.834387e+00 3.190739e-01 1230 1 + 40 7.000000e+00 6.823805e+00 3.739879e-01 1440 1 + 50 7.000000e+00 6.823805e+00 5.250859e-01 1950 1 + 60 5.000000e+00 6.823805e+00 5.812900e-01 2160 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.530329e-01 +total time (s) : 5.812900e-01 total solves : 2160 best bound : 6.823805e+00 simulation ci : 6.183333e+00 ± 6.258900e-01 @@ -908,15 +908,15 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.549668e+06 2.078257e+06 5.127652e-01 920 1 - 20 5.494568e+05 2.078257e+06 7.066481e-01 1340 1 - 30 4.985879e+04 2.078257e+06 1.253876e+00 2260 1 - 40 3.799447e+06 2.078257e+06 1.449975e+00 2680 1 - 50 1.049867e+06 2.078257e+06 1.994528e+00 3600 1 - 60 3.985191e+04 2.078257e+06 2.250975e+00 4020 1 + 10 2.549668e+06 2.078257e+06 5.411520e-01 920 1 + 20 5.494568e+05 2.078257e+06 7.411749e-01 1340 1 + 30 4.985879e+04 2.078257e+06 1.292261e+00 2260 1 + 40 3.799447e+06 2.078257e+06 1.495686e+00 2680 1 + 50 1.049867e+06 2.078257e+06 2.063266e+00 3600 1 + 60 3.985191e+04 2.078257e+06 2.269838e+00 4020 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.250975e+00 +total time (s) : 2.269838e+00 total solves : 4020 best bound : 2.078257e+06 simulation ci : 2.031697e+06 ± 3.922745e+05 @@ -950,15 +950,15 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10L 4.986663e+04 2.079119e+06 9.420180e-01 920 1 - 20L 3.799878e+06 2.079330e+06 1.653278e+00 1340 1 - 30L 3.003923e+04 2.079457e+06 2.755824e+00 2260 1 - 40L 5.549882e+06 2.079457e+06 3.543915e+00 2680 1 - 50L 2.799466e+06 2.079457e+06 4.709486e+00 3600 1 - 60L 3.549880e+06 2.079457e+06 5.470320e+00 4020 1 + 10L 4.986663e+04 2.079119e+06 9.979272e-01 920 1 + 20L 3.799878e+06 2.079330e+06 1.750540e+00 1340 1 + 30L 3.003923e+04 2.079457e+06 2.912281e+00 2260 1 + 40L 5.549882e+06 2.079457e+06 3.757357e+00 2680 1 + 50L 2.799466e+06 2.079457e+06 4.981082e+00 3600 1 + 60L 3.549880e+06 2.079457e+06 5.788100e+00 4020 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.470320e+00 +total time (s) : 5.788100e+00 total solves : 4020 best bound : 2.079457e+06 simulation ci : 2.352204e+06 ± 5.377531e+05 @@ -990,13 +990,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 2.500000e+01 1.188965e+02 7.887051e-01 1946 1 - 200 2.500000e+01 1.191634e+02 1.009564e+00 3920 1 - 300 0.000000e+00 1.191666e+02 1.224198e+00 5902 1 - 330 2.500000e+01 1.191667e+02 1.266861e+00 6224 1 + 100 2.500000e+01 1.188965e+02 8.310049e-01 1946 1 + 200 2.500000e+01 1.191634e+02 1.044859e+00 3920 1 + 300 0.000000e+00 1.191666e+02 1.265158e+00 5902 1 + 330 2.500000e+01 1.191667e+02 1.308159e+00 6224 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.266861e+00 +total time (s) : 1.308159e+00 total solves : 6224 best bound : 1.191667e+02 simulation ci : 2.158333e+01 ± 3.290252e+00 @@ -1028,12 +1028,12 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 0.000000e+00 1.191285e+02 2.892599e-01 2874 1 - 200 2.500000e+00 1.191666e+02 5.641530e-01 4855 1 - 282 7.500000e+00 1.191667e+02 6.984270e-01 5733 1 + 100 0.000000e+00 1.191285e+02 2.998888e-01 2874 1 + 200 2.500000e+00 1.191666e+02 5.349660e-01 4855 1 + 282 7.500000e+00 1.191667e+02 6.698999e-01 5733 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.984270e-01 +total time (s) : 6.698999e-01 total solves : 5733 best bound : 1.191667e+02 simulation ci : 2.104610e+01 ± 3.492245e+00 @@ -1064,13 +1064,13 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.000000e+00 1.997089e+01 7.021093e-02 1204 1 - 20 8.000000e+00 2.000000e+01 9.124708e-02 1420 1 - 30 1.600000e+01 2.000000e+01 1.582351e-01 2628 1 - 40 8.000000e+00 2.000000e+01 1.809099e-01 2834 1 + 10 4.000000e+00 1.997089e+01 1.078000e-01 1204 1 + 20 8.000000e+00 2.000000e+01 1.287360e-01 1420 1 + 30 1.600000e+01 2.000000e+01 1.973190e-01 2628 1 + 40 8.000000e+00 2.000000e+01 2.192059e-01 2834 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.809099e-01 +total time (s) : 2.192059e-01 total solves : 2834 best bound : 2.000000e+01 simulation ci : 1.625000e+01 ± 4.766381e+00 @@ -1101,11 +1101,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.000000e+00 1.500000e+00 1.583099e-03 3 1 - 40 4.000000e+00 2.000000e+00 4.327106e-02 578 1 + 1 1.000000e+00 1.500000e+00 1.635075e-03 3 1 + 40 4.000000e+00 2.000000e+00 4.523492e-02 578 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.327106e-02 +total time (s) : 4.523492e-02 total solves : 578 best bound : 2.000000e+00 simulation ci : 1.950000e+00 ± 5.568095e-01 @@ -1138,137 +1138,135 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 5.250000e+00 4.888859e+00 1.687591e-01 1350 1 - 20 4.350000e+00 4.105855e+00 2.892880e-01 2700 1 - 30 5.000000e+00 4.100490e+00 3.864150e-01 4050 1 - 40 3.500000e+00 4.097376e+00 4.918370e-01 5400 1 - 50 5.250000e+00 4.095859e+00 6.017079e-01 6750 1 - 60 3.643750e+00 4.093342e+00 7.160020e-01 8100 1 - 70 2.643750e+00 4.091818e+00 8.286331e-01 9450 1 - 80 5.087500e+00 4.091591e+00 9.450080e-01 10800 1 - 90 5.062500e+00 4.091309e+00 1.061172e+00 12150 1 - 100 4.843750e+00 4.087004e+00 1.186036e+00 13500 1 - 110 3.437500e+00 4.086094e+00 1.311120e+00 14850 1 - 120 3.375000e+00 4.085926e+00 1.437098e+00 16200 1 - 130 5.025000e+00 4.085866e+00 1.565217e+00 17550 1 - 140 5.000000e+00 4.085734e+00 1.693743e+00 18900 1 - 150 3.500000e+00 4.085655e+00 1.824377e+00 20250 1 - 160 4.281250e+00 4.085454e+00 1.953747e+00 21600 1 - 170 4.562500e+00 4.085425e+00 2.085516e+00 22950 1 - 180 5.768750e+00 4.085425e+00 2.216371e+00 24300 1 - 190 3.468750e+00 4.085359e+00 2.353678e+00 25650 1 - 200 4.131250e+00 4.085225e+00 2.489963e+00 27000 1 - 210 4.512500e+00 4.085157e+00 2.662310e+00 28350 1 - 220 4.900000e+00 4.085153e+00 2.796681e+00 29700 1 - 230 4.025000e+00 4.085134e+00 2.934796e+00 31050 1 - 240 4.468750e+00 4.085116e+00 3.078804e+00 32400 1 - 250 4.062500e+00 4.085075e+00 3.218615e+00 33750 1 - 260 4.875000e+00 4.085037e+00 3.361501e+00 35100 1 - 270 3.850000e+00 4.085011e+00 3.505550e+00 36450 1 - 280 4.912500e+00 4.084992e+00 3.648949e+00 37800 1 - 290 2.987500e+00 4.084986e+00 3.797585e+00 39150 1 - 300 3.825000e+00 4.084957e+00 3.948527e+00 40500 1 - 310 3.250000e+00 4.084911e+00 4.099108e+00 41850 1 - 320 3.600000e+00 4.084896e+00 4.247181e+00 43200 1 - 330 3.925000e+00 4.084896e+00 4.385317e+00 44550 1 - 340 4.500000e+00 4.084893e+00 4.531839e+00 45900 1 - 350 5.000000e+00 4.084891e+00 4.678074e+00 47250 1 - 360 3.075000e+00 4.084866e+00 4.823599e+00 48600 1 - 370 3.500000e+00 4.084861e+00 4.976336e+00 49950 1 - 380 3.356250e+00 4.084857e+00 5.132696e+00 51300 1 - 390 5.500000e+00 4.084846e+00 5.293799e+00 52650 1 - 400 4.475000e+00 4.084846e+00 5.444030e+00 54000 1 - 410 3.750000e+00 4.084843e+00 5.597501e+00 55350 1 - 420 3.687500e+00 4.084843e+00 5.778766e+00 56700 1 - 430 4.337500e+00 4.084825e+00 5.939407e+00 58050 1 - 440 5.750000e+00 4.084825e+00 6.085826e+00 59400 1 - 450 4.925000e+00 4.084792e+00 6.249531e+00 60750 1 - 460 3.600000e+00 4.084792e+00 6.406852e+00 62100 1 - 470 4.387500e+00 4.084792e+00 6.560811e+00 63450 1 - 480 4.000000e+00 4.084792e+00 6.725306e+00 64800 1 - 490 2.975000e+00 4.084788e+00 6.882730e+00 66150 1 - 500 3.125000e+00 4.084788e+00 7.038350e+00 67500 1 - 510 4.250000e+00 4.084788e+00 7.202558e+00 68850 1 - 520 4.512500e+00 4.084786e+00 7.356405e+00 70200 1 - 530 3.875000e+00 4.084786e+00 7.520775e+00 71550 1 - 540 4.387500e+00 4.084781e+00 7.685258e+00 72900 1 - 550 5.281250e+00 4.084780e+00 7.851898e+00 74250 1 - 560 4.650000e+00 4.084780e+00 8.006791e+00 75600 1 - 570 3.062500e+00 4.084780e+00 8.165572e+00 76950 1 - 580 3.187500e+00 4.084780e+00 8.318219e+00 78300 1 - 590 3.812500e+00 4.084780e+00 8.467485e+00 79650 1 - 600 3.637500e+00 4.084774e+00 8.628129e+00 81000 1 - 610 3.950000e+00 4.084765e+00 8.814753e+00 82350 1 - 620 4.625000e+00 4.084760e+00 8.973007e+00 83700 1 - 630 4.218750e+00 4.084760e+00 9.134514e+00 85050 1 - 640 3.025000e+00 4.084755e+00 9.300601e+00 86400 1 - 650 2.993750e+00 4.084751e+00 9.454259e+00 87750 1 - 660 3.262500e+00 4.084746e+00 9.613243e+00 89100 1 - 670 3.625000e+00 4.084746e+00 9.777580e+00 90450 1 - 680 2.981250e+00 4.084746e+00 9.943775e+00 91800 1 - 690 4.187500e+00 4.084746e+00 1.010451e+01 93150 1 - 700 4.500000e+00 4.084746e+00 1.026283e+01 94500 1 - 710 3.225000e+00 4.084746e+00 1.042250e+01 95850 1 - 720 4.375000e+00 4.084746e+00 1.058527e+01 97200 1 - 730 2.650000e+00 4.084746e+00 1.075253e+01 98550 1 - 740 3.250000e+00 4.084746e+00 1.091233e+01 99900 1 - 750 4.725000e+00 4.084746e+00 1.108550e+01 101250 1 - 760 3.375000e+00 4.084746e+00 1.125864e+01 102600 1 - 770 5.375000e+00 4.084746e+00 1.142763e+01 103950 1 - 780 4.068750e+00 4.084746e+00 1.159928e+01 105300 1 - 790 4.412500e+00 4.084746e+00 1.179781e+01 106650 1 - 800 4.350000e+00 4.084746e+00 1.197417e+01 108000 1 - 810 5.887500e+00 4.084746e+00 1.214956e+01 109350 1 - 820 4.912500e+00 4.084746e+00 1.231838e+01 110700 1 - 830 4.387500e+00 4.084746e+00 1.248129e+01 112050 1 - 840 3.675000e+00 4.084746e+00 1.265265e+01 113400 1 - 850 5.375000e+00 4.084746e+00 1.282129e+01 114750 1 - 860 3.562500e+00 4.084746e+00 1.299733e+01 116100 1 - 870 3.075000e+00 4.084746e+00 1.317557e+01 117450 1 - 880 3.625000e+00 4.084746e+00 1.334710e+01 118800 1 - 890 2.937500e+00 4.084746e+00 1.351453e+01 120150 1 - 900 4.450000e+00 4.084746e+00 1.369199e+01 121500 1 - 910 4.200000e+00 4.084746e+00 1.386539e+01 122850 1 - 920 3.687500e+00 4.084746e+00 1.404455e+01 124200 1 - 930 4.725000e+00 4.084746e+00 1.422308e+01 125550 1 - 940 4.018750e+00 4.084746e+00 1.439535e+01 126900 1 - 950 4.675000e+00 4.084746e+00 1.456329e+01 128250 1 - 960 3.375000e+00 4.084746e+00 1.475449e+01 129600 1 - 970 3.812500e+00 4.084746e+00 1.491792e+01 130950 1 - 980 3.112500e+00 4.084746e+00 1.508757e+01 132300 1 - 990 3.600000e+00 4.084746e+00 1.525910e+01 133650 1 - 1000 5.500000e+00 4.084746e+00 1.543728e+01 135000 1 - 1010 3.187500e+00 4.084746e+00 1.560918e+01 136350 1 - 1020 4.900000e+00 4.084746e+00 1.578396e+01 137700 1 - 1030 3.637500e+00 4.084746e+00 1.597247e+01 139050 1 - 1040 3.975000e+00 4.084746e+00 1.614785e+01 140400 1 - 1050 4.750000e+00 4.084746e+00 1.632423e+01 141750 1 - 1060 4.437500e+00 4.084746e+00 1.652192e+01 143100 1 - 1070 5.000000e+00 4.084746e+00 1.670562e+01 144450 1 - 1080 4.143750e+00 4.084746e+00 1.689072e+01 145800 1 - 1090 5.625000e+00 4.084746e+00 1.708490e+01 147150 1 - 1100 3.475000e+00 4.084746e+00 1.726644e+01 148500 1 - 1110 4.156250e+00 4.084746e+00 1.745366e+01 149850 1 - 1120 4.450000e+00 4.084746e+00 1.763757e+01 151200 1 - 1130 3.312500e+00 4.084741e+00 1.782470e+01 152550 1 - 1140 5.375000e+00 4.084741e+00 1.800408e+01 153900 1 - 1150 4.800000e+00 4.084737e+00 1.819346e+01 155250 1 - 1160 3.300000e+00 4.084737e+00 1.837392e+01 156600 1 - 1170 4.356250e+00 4.084737e+00 1.855866e+01 157950 1 - 1180 3.900000e+00 4.084737e+00 1.874676e+01 159300 1 - 1190 4.450000e+00 4.084737e+00 1.893841e+01 160650 1 - 1200 5.156250e+00 4.084737e+00 1.914838e+01 162000 1 - 1210 4.500000e+00 4.084737e+00 1.932547e+01 163350 1 - 1220 4.875000e+00 4.084737e+00 1.952324e+01 164700 1 - 1230 4.000000e+00 4.084737e+00 1.970620e+01 166050 1 - 1240 4.062500e+00 4.084737e+00 1.989083e+01 167400 1 - 1246 3.000000e+00 4.084737e+00 2.000504e+01 168210 1 + 10 5.250000e+00 4.888859e+00 1.716678e-01 1350 1 + 20 4.350000e+00 4.105855e+00 2.587609e-01 2700 1 + 30 5.000000e+00 4.100490e+00 3.567028e-01 4050 1 + 40 3.500000e+00 4.097376e+00 4.626410e-01 5400 1 + 50 5.250000e+00 4.095859e+00 5.744269e-01 6750 1 + 60 3.643750e+00 4.093342e+00 6.895239e-01 8100 1 + 70 2.643750e+00 4.091818e+00 8.075178e-01 9450 1 + 80 5.087500e+00 4.091591e+00 9.248099e-01 10800 1 + 90 5.062500e+00 4.091309e+00 1.043386e+00 12150 1 + 100 4.843750e+00 4.087004e+00 1.171260e+00 13500 1 + 110 3.437500e+00 4.086094e+00 1.298146e+00 14850 1 + 120 3.375000e+00 4.085926e+00 1.466470e+00 16200 1 + 130 5.025000e+00 4.085866e+00 1.596104e+00 17550 1 + 140 5.000000e+00 4.085734e+00 1.725039e+00 18900 1 + 150 3.500000e+00 4.085655e+00 1.858264e+00 20250 1 + 160 4.281250e+00 4.085454e+00 1.986735e+00 21600 1 + 170 4.562500e+00 4.085425e+00 2.118574e+00 22950 1 + 180 5.768750e+00 4.085425e+00 2.250475e+00 24300 1 + 190 3.468750e+00 4.085359e+00 2.390340e+00 25650 1 + 200 4.131250e+00 4.085225e+00 2.527694e+00 27000 1 + 210 4.512500e+00 4.085157e+00 2.662507e+00 28350 1 + 220 4.900000e+00 4.085153e+00 2.801828e+00 29700 1 + 230 4.025000e+00 4.085134e+00 2.941956e+00 31050 1 + 240 4.468750e+00 4.085116e+00 3.087785e+00 32400 1 + 250 4.062500e+00 4.085075e+00 3.228552e+00 33750 1 + 260 4.875000e+00 4.085037e+00 3.371179e+00 35100 1 + 270 3.850000e+00 4.085011e+00 3.514160e+00 36450 1 + 280 4.912500e+00 4.084992e+00 3.658795e+00 37800 1 + 290 2.987500e+00 4.084986e+00 3.809290e+00 39150 1 + 300 3.825000e+00 4.084957e+00 3.960671e+00 40500 1 + 310 3.250000e+00 4.084911e+00 4.111480e+00 41850 1 + 320 3.600000e+00 4.084896e+00 4.262457e+00 43200 1 + 330 3.925000e+00 4.084896e+00 4.402288e+00 44550 1 + 340 4.500000e+00 4.084893e+00 4.550622e+00 45900 1 + 350 5.000000e+00 4.084891e+00 4.735421e+00 47250 1 + 360 3.075000e+00 4.084866e+00 4.884727e+00 48600 1 + 370 3.500000e+00 4.084861e+00 5.037859e+00 49950 1 + 380 3.356250e+00 4.084857e+00 5.191349e+00 51300 1 + 390 5.500000e+00 4.084846e+00 5.357842e+00 52650 1 + 400 4.475000e+00 4.084846e+00 5.517082e+00 54000 1 + 410 3.750000e+00 4.084843e+00 5.675294e+00 55350 1 + 420 3.687500e+00 4.084843e+00 5.839933e+00 56700 1 + 430 4.337500e+00 4.084825e+00 6.008952e+00 58050 1 + 440 5.750000e+00 4.084825e+00 6.159086e+00 59400 1 + 450 4.925000e+00 4.084792e+00 6.328141e+00 60750 1 + 460 3.600000e+00 4.084792e+00 6.491750e+00 62100 1 + 470 4.387500e+00 4.084792e+00 6.648771e+00 63450 1 + 480 4.000000e+00 4.084792e+00 6.817950e+00 64800 1 + 490 2.975000e+00 4.084788e+00 6.977072e+00 66150 1 + 500 3.125000e+00 4.084788e+00 7.132193e+00 67500 1 + 510 4.250000e+00 4.084788e+00 7.302382e+00 68850 1 + 520 4.512500e+00 4.084786e+00 7.458969e+00 70200 1 + 530 3.875000e+00 4.084786e+00 7.624709e+00 71550 1 + 540 4.387500e+00 4.084781e+00 7.789676e+00 72900 1 + 550 5.281250e+00 4.084780e+00 7.960666e+00 74250 1 + 560 4.650000e+00 4.084780e+00 8.120844e+00 75600 1 + 570 3.062500e+00 4.084780e+00 8.310673e+00 76950 1 + 580 3.187500e+00 4.084780e+00 8.465287e+00 78300 1 + 590 3.812500e+00 4.084780e+00 8.616217e+00 79650 1 + 600 3.637500e+00 4.084774e+00 8.781291e+00 81000 1 + 610 3.950000e+00 4.084765e+00 8.943795e+00 82350 1 + 620 4.625000e+00 4.084760e+00 9.109773e+00 83700 1 + 630 4.218750e+00 4.084760e+00 9.278603e+00 85050 1 + 640 3.025000e+00 4.084755e+00 9.447766e+00 86400 1 + 650 2.993750e+00 4.084751e+00 9.614077e+00 87750 1 + 660 3.262500e+00 4.084746e+00 9.779014e+00 89100 1 + 670 3.625000e+00 4.084746e+00 9.949214e+00 90450 1 + 680 2.981250e+00 4.084746e+00 1.013076e+01 91800 1 + 690 4.187500e+00 4.084746e+00 1.030210e+01 93150 1 + 700 4.500000e+00 4.084746e+00 1.046893e+01 94500 1 + 710 3.225000e+00 4.084746e+00 1.064120e+01 95850 1 + 720 4.375000e+00 4.084746e+00 1.081753e+01 97200 1 + 730 2.650000e+00 4.084746e+00 1.100032e+01 98550 1 + 740 3.250000e+00 4.084746e+00 1.117048e+01 99900 1 + 750 4.725000e+00 4.084746e+00 1.135055e+01 101250 1 + 760 3.375000e+00 4.084746e+00 1.155643e+01 102600 1 + 770 5.375000e+00 4.084746e+00 1.173506e+01 103950 1 + 780 4.068750e+00 4.084746e+00 1.191593e+01 105300 1 + 790 4.412500e+00 4.084746e+00 1.210244e+01 106650 1 + 800 4.350000e+00 4.084746e+00 1.228223e+01 108000 1 + 810 5.887500e+00 4.084746e+00 1.246085e+01 109350 1 + 820 4.912500e+00 4.084746e+00 1.263127e+01 110700 1 + 830 4.387500e+00 4.084746e+00 1.279979e+01 112050 1 + 840 3.675000e+00 4.084746e+00 1.297574e+01 113400 1 + 850 5.375000e+00 4.084746e+00 1.314960e+01 114750 1 + 860 3.562500e+00 4.084746e+00 1.332984e+01 116100 1 + 870 3.075000e+00 4.084746e+00 1.351357e+01 117450 1 + 880 3.625000e+00 4.084746e+00 1.368602e+01 118800 1 + 890 2.937500e+00 4.084746e+00 1.385575e+01 120150 1 + 900 4.450000e+00 4.084746e+00 1.403830e+01 121500 1 + 910 4.200000e+00 4.084746e+00 1.422259e+01 122850 1 + 920 3.687500e+00 4.084746e+00 1.440789e+01 124200 1 + 930 4.725000e+00 4.084746e+00 1.461609e+01 125550 1 + 940 4.018750e+00 4.084746e+00 1.479063e+01 126900 1 + 950 4.675000e+00 4.084746e+00 1.497216e+01 128250 1 + 960 3.375000e+00 4.084746e+00 1.514355e+01 129600 1 + 970 3.812500e+00 4.084746e+00 1.531842e+01 130950 1 + 980 3.112500e+00 4.084746e+00 1.549276e+01 132300 1 + 990 3.600000e+00 4.084746e+00 1.566755e+01 133650 1 + 1000 5.500000e+00 4.084746e+00 1.584500e+01 135000 1 + 1010 3.187500e+00 4.084746e+00 1.601833e+01 136350 1 + 1020 4.900000e+00 4.084746e+00 1.619413e+01 137700 1 + 1030 3.637500e+00 4.084746e+00 1.638374e+01 139050 1 + 1040 3.975000e+00 4.084746e+00 1.656010e+01 140400 1 + 1050 4.750000e+00 4.084746e+00 1.673823e+01 141750 1 + 1060 4.437500e+00 4.084746e+00 1.693251e+01 143100 1 + 1070 5.000000e+00 4.084746e+00 1.713577e+01 144450 1 + 1080 4.143750e+00 4.084746e+00 1.732494e+01 145800 1 + 1090 5.625000e+00 4.084746e+00 1.750154e+01 147150 1 + 1100 3.475000e+00 4.084746e+00 1.768629e+01 148500 1 + 1110 4.156250e+00 4.084746e+00 1.787517e+01 149850 1 + 1120 4.450000e+00 4.084746e+00 1.806196e+01 151200 1 + 1130 3.312500e+00 4.084741e+00 1.824955e+01 152550 1 + 1140 5.375000e+00 4.084741e+00 1.842986e+01 153900 1 + 1150 4.800000e+00 4.084737e+00 1.862470e+01 155250 1 + 1160 3.300000e+00 4.084737e+00 1.880885e+01 156600 1 + 1170 4.356250e+00 4.084737e+00 1.899097e+01 157950 1 + 1180 3.900000e+00 4.084737e+00 1.918029e+01 159300 1 + 1190 4.450000e+00 4.084737e+00 1.939933e+01 160650 1 + 1200 5.156250e+00 4.084737e+00 1.959207e+01 162000 1 + 1210 4.500000e+00 4.084737e+00 1.977063e+01 163350 1 + 1220 4.875000e+00 4.084737e+00 1.996918e+01 164700 1 + 1222 4.562500e+00 4.084737e+00 2.000839e+01 164970 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.000504e+01 -total solves : 168210 +total time (s) : 2.000839e+01 +total solves : 164970 best bound : 4.084737e+00 -simulation ci : 4.071445e+00 ± 4.036229e-02 +simulation ci : 4.071580e+00 ± 4.071235e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -1298,29 +1296,30 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.512500e+00 4.066874e+00 1.945758e-01 1350 1 - 20 5.062500e+00 4.040569e+00 5.647199e-01 2700 1 - 30 4.968750e+00 4.039400e+00 1.092341e+00 4050 1 - 40 4.125000e+00 4.039286e+00 1.748799e+00 5400 1 - 50 3.925000e+00 4.039078e+00 3.279388e+00 6750 1 - 60 3.875000e+00 4.039004e+00 4.238859e+00 8100 1 - 70 3.918750e+00 4.039008e+00 5.358448e+00 9450 1 - 80 3.600000e+00 4.038911e+00 6.510171e+00 10800 1 - 90 4.250000e+00 4.038874e+00 7.829152e+00 12150 1 - 100 5.400000e+00 4.038820e+00 9.207392e+00 13500 1 - 110 3.000000e+00 4.038795e+00 1.072500e+01 14850 1 - 120 3.000000e+00 4.038812e+00 1.232206e+01 16200 1 - 130 2.993750e+00 4.038782e+00 1.403743e+01 17550 1 - 140 4.406250e+00 4.038770e+00 1.594878e+01 18900 1 - 150 5.625000e+00 4.038777e+00 1.789192e+01 20250 1 - 160 3.081250e+00 4.038772e+00 1.990137e+01 21600 1 - 161 4.875000e+00 4.038772e+00 2.012187e+01 21735 1 + 10 4.925000e+00 6.001820e+00 1.997499e-01 1350 1 + 20 4.056250e+00 5.577813e+00 6.041179e-01 2700 1 + 30 3.000000e+00 4.731415e+00 1.068949e+00 4050 1 + 40 5.025000e+00 4.043470e+00 1.588734e+00 5400 1 + 50 4.250000e+00 4.039874e+00 2.212946e+00 6750 1 + 60 4.312500e+00 4.039177e+00 2.971758e+00 8100 1 + 70 4.525000e+00 4.039054e+00 3.933827e+00 9450 1 + 80 3.687500e+00 4.039051e+00 5.006665e+00 10800 1 + 90 2.987500e+00 4.038970e+00 6.187553e+00 12150 1 + 100 3.225000e+00 4.038843e+00 7.497448e+00 13500 1 + 110 4.500000e+00 4.038843e+00 8.892059e+00 14850 1 + 120 5.750000e+00 4.038813e+00 1.041892e+01 16200 1 + 130 3.700000e+00 4.038777e+00 1.202767e+01 17550 1 + 140 3.800000e+00 4.038777e+00 1.373855e+01 18900 1 + 150 2.687500e+00 4.038777e+00 1.553139e+01 20250 1 + 160 4.737500e+00 4.038777e+00 1.758916e+01 21600 1 + 170 4.550000e+00 4.038777e+00 1.976712e+01 22950 1 + 172 3.050000e+00 4.038777e+00 2.014962e+01 23220 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.012187e+01 -total solves : 21735 -best bound : 4.038772e+00 -simulation ci : 4.072826e+00 ± 1.208714e-01 +total time (s) : 2.014962e+01 +total solves : 23220 +best bound : 4.038777e+00 +simulation ci : 4.085411e+00 ± 1.137697e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -1351,21 +1350,20 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.987639e+00 1.163194e+00 3.866601e-01 1680 1 - 20 2.445919e+00 1.164572e+00 4.845541e-01 2560 1 - 30 3.191503e+00 1.164572e+00 8.795722e-01 4240 1 - 40 2.867005e+00 1.164617e+00 9.841540e-01 5120 1 - 50 2.788348e+00 1.164617e+00 1.385857e+00 6800 1 - 60 3.120693e+00 1.166901e+00 1.493642e+00 7680 1 - 70 2.863623e+00 1.166901e+00 1.895165e+00 9360 1 - 80 3.556423e+00 1.166901e+00 2.014104e+00 10240 1 - 84 3.052539e+00 1.166901e+00 2.057870e+00 10592 1 + 10 2.738921e+00 1.165036e+00 3.904660e-01 1680 1 + 20 3.573852e+00 1.165415e+00 4.874790e-01 2560 1 + 30 3.448800e+00 1.167299e+00 8.816559e-01 4240 1 + 40 2.671985e+00 1.167299e+00 9.822218e-01 5120 1 + 50 3.608689e+00 1.167299e+00 1.380473e+00 6800 1 + 60 2.737611e+00 1.167299e+00 1.482815e+00 7680 1 + 70 3.529174e+00 1.167299e+00 1.894601e+00 9360 1 + 80 3.538601e+00 1.167299e+00 2.025845e+00 10240 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.057870e+00 -total solves : 10592 -best bound : 1.166901e+00 -simulation ci : 3.160067e+00 ± 7.484453e-02 +total time (s) : 2.025845e+00 +total solves : 10240 +best bound : 1.167299e+00 +simulation ci : 3.210392e+00 ± 9.260012e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -1397,16 +1395,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -5.809615e+01 3.250408e-02 78 1 - 20 -9.800000e+01 -5.809615e+01 6.497502e-02 148 1 - 30 -9.800000e+01 -5.809615e+01 1.073740e-01 226 1 - 40 -4.000000e+01 -5.809615e+01 1.444240e-01 296 1 + 10 -9.800000e+01 -5.809615e+01 3.199697e-02 78 1 + 20 -4.000000e+01 -5.809615e+01 6.564403e-02 148 1 + 30 -9.800000e+01 -5.809615e+01 1.043379e-01 226 1 + 40 -4.700000e+01 -5.809615e+01 1.380351e-01 296 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.444240e-01 +total time (s) : 1.380351e-01 total solves : 296 best bound : -5.809615e+01 -simulation ci : -5.676250e+01 ± 8.129290e+00 +simulation ci : -4.968750e+01 ± 6.340715e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1438,16 +1436,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -9.800000e+01 -6.196125e+01 4.038405e-02 138 1 - 20 -8.200000e+01 -6.196125e+01 7.790899e-02 258 1 - 30 -4.000000e+01 -6.196125e+01 1.283491e-01 396 1 - 40 -9.800000e+01 -6.196125e+01 1.703582e-01 516 1 + 10 -4.700000e+01 -6.196125e+01 3.930807e-02 138 1 + 20 -4.000000e+01 -6.196125e+01 7.583189e-02 258 1 + 30 -4.700000e+01 -6.196125e+01 1.254940e-01 396 1 + 40 -4.700000e+01 -6.196125e+01 1.631250e-01 516 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.703582e-01 +total time (s) : 1.631250e-01 total solves : 516 best bound : -6.196125e+01 -simulation ci : -5.646250e+01 ± 6.296744e+00 +simulation ci : -5.131250e+01 ± 5.474059e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1479,16 +1477,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -7.900000e+01 -6.546793e+01 7.706094e-02 462 1 - 20 -7.500000e+01 -6.546793e+01 1.395438e-01 852 1 - 30 -7.500000e+01 -6.546793e+01 2.574949e-01 1314 1 - 40 -6.300000e+01 -6.546793e+01 3.206749e-01 1704 1 + 10 -6.300000e+01 -6.546793e+01 7.825494e-02 462 1 + 20 -5.600000e+01 -6.546793e+01 1.369660e-01 852 1 + 30 -4.000000e+01 -6.546793e+01 2.524149e-01 1314 1 + 40 -7.000000e+01 -6.546793e+01 3.119290e-01 1704 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.206749e-01 +total time (s) : 3.119290e-01 total solves : 1704 best bound : -6.546793e+01 -simulation ci : -6.596250e+01 ± 4.258497e+00 +simulation ci : -6.001250e+01 ± 5.827677e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -1519,14 +1517,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 1.200000e+01 4.188991e-02 11 1 - 40L 6.000000e+00 8.000000e+00 4.264100e-01 602 1 + 1L 9.000000e+00 1.422222e+01 4.241514e-02 11 1 + 40L 1.200000e+01 8.000000e+00 5.433002e-01 602 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.264100e-01 +total time (s) : 5.433002e-01 total solves : 602 best bound : 8.000000e+00 -simulation ci : 8.100000e+00 ± 9.225303e-01 +simulation ci : 7.725000e+00 ± 8.123457e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -1557,14 +1555,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -9.800000e+04 4.922260e+05 8.616400e-02 6 1 - 40 4.882000e+04 1.083900e+05 1.159940e-01 240 1 + 1 -9.800000e+04 4.922260e+05 8.789086e-02 6 1 + 40 4.882000e+04 1.083900e+05 1.170979e-01 240 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.159940e-01 +total time (s) : 1.170979e-01 total solves : 240 best bound : 1.083900e+05 -simulation ci : 9.479342e+04 ± 1.998027e+04 +simulation ci : 9.422433e+04 ± 2.039856e+04 numeric issues : 0 ------------------------------------------------------------------- diff --git a/dev/examples/SDDP_0.0.log b/dev/examples/SDDP_0.0.log index cbde0f3cd..ff2fc275b 100644 --- a/dev/examples/SDDP_0.0.log +++ b/dev/examples/SDDP_0.0.log @@ -19,11 +19,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 0.000000e+00 8.840084e-03 36 1 - 10 0.000000e+00 0.000000e+00 2.838302e-02 360 1 + 1 0.000000e+00 0.000000e+00 9.399176e-03 36 1 + 10 0.000000e+00 0.000000e+00 2.939820e-02 360 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.838302e-02 +total time (s) : 2.939820e-02 total solves : 360 best bound : 0.000000e+00 simulation ci : 0.000000e+00 ± 0.000000e+00 diff --git a/dev/examples/SDDP_0.0625.log b/dev/examples/SDDP_0.0625.log index 0c0a1a97a..46ca941d8 100644 --- a/dev/examples/SDDP_0.0625.log +++ b/dev/examples/SDDP_0.0625.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.437500e+01 5.937500e+01 3.201008e-03 3375 1 - 10 3.750000e+01 5.938557e+01 3.138804e-02 3699 1 + 1 3.437500e+01 5.937500e+01 3.830910e-03 3375 1 + 10 3.750000e+01 5.938557e+01 3.281999e-02 3699 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.138804e-02 +total time (s) : 3.281999e-02 total solves : 3699 best bound : 5.938557e+01 simulation ci : 5.906250e+01 ± 1.352595e+01 diff --git a/dev/examples/SDDP_0.125.log b/dev/examples/SDDP_0.125.log index 0e8eb8877..d4e27777f 100644 --- a/dev/examples/SDDP_0.125.log +++ b/dev/examples/SDDP_0.125.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.675000e+02 1.129545e+02 2.846956e-03 1891 1 - 10 1.362500e+02 1.129771e+02 3.016996e-02 2215 1 + 1 1.675000e+02 1.129545e+02 3.082991e-03 1891 1 + 10 1.362500e+02 1.129771e+02 3.142214e-02 2215 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.016996e-02 +total time (s) : 3.142214e-02 total solves : 2215 best bound : 1.129771e+02 simulation ci : 1.176375e+02 ± 1.334615e+01 diff --git a/dev/examples/SDDP_0.25.log b/dev/examples/SDDP_0.25.log index 6651ea6e2..b7a77afa8 100644 --- a/dev/examples/SDDP_0.25.log +++ b/dev/examples/SDDP_0.25.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.887500e+02 1.995243e+02 2.810001e-03 1149 1 - 10 2.962500e+02 2.052855e+02 3.052688e-02 1473 1 + 1 1.887500e+02 1.995243e+02 2.953053e-03 1149 1 + 10 2.962500e+02 2.052855e+02 3.094411e-02 1473 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.052688e-02 +total time (s) : 3.094411e-02 total solves : 1473 best bound : 2.052855e+02 simulation ci : 2.040201e+02 ± 3.876873e+01 diff --git a/dev/examples/SDDP_0.375.log b/dev/examples/SDDP_0.375.log index 4a932e08d..475611be6 100644 --- a/dev/examples/SDDP_0.375.log +++ b/dev/examples/SDDP_0.375.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.562500e+02 2.788373e+02 3.228903e-03 2262 1 - 10 2.375000e+02 2.795671e+02 5.227590e-02 2586 1 + 1 2.562500e+02 2.788373e+02 3.308058e-03 2262 1 + 10 2.375000e+02 2.795671e+02 3.411508e-02 2586 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.227590e-02 +total time (s) : 3.411508e-02 total solves : 2586 best bound : 2.795671e+02 simulation ci : 2.375000e+02 ± 3.099032e+01 diff --git a/dev/examples/SDDP_0.5.log b/dev/examples/SDDP_0.5.log index f9840d37d..0a08af092 100644 --- a/dev/examples/SDDP_0.5.log +++ b/dev/examples/SDDP_0.5.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.850000e+02 3.349793e+02 3.066063e-03 778 1 - 10 3.550000e+02 3.468286e+02 6.175089e-02 1102 1 + 1 4.850000e+02 3.349793e+02 3.223181e-03 778 1 + 10 3.550000e+02 3.468286e+02 3.217816e-02 1102 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 6.175089e-02 +total time (s) : 3.217816e-02 total solves : 1102 best bound : 3.468286e+02 simulation ci : 3.948309e+02 ± 7.954180e+01 diff --git a/dev/examples/SDDP_0.625.log b/dev/examples/SDDP_0.625.log index fbaa2bcf5..e661064ab 100644 --- a/dev/examples/SDDP_0.625.log +++ b/dev/examples/SDDP_0.625.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.812500e+02 4.072952e+02 3.688812e-03 2633 1 - 10 5.818750e+02 4.080500e+02 3.585982e-02 2957 1 + 1 3.812500e+02 4.072952e+02 3.736019e-03 2633 1 + 10 5.818750e+02 4.080500e+02 3.661990e-02 2957 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.585982e-02 +total time (s) : 3.661990e-02 total solves : 2957 best bound : 4.080500e+02 simulation ci : 4.235323e+02 ± 1.029245e+02 diff --git a/dev/examples/SDDP_0.75.log b/dev/examples/SDDP_0.75.log index 95049b6bc..21adca4c0 100644 --- a/dev/examples/SDDP_0.75.log +++ b/dev/examples/SDDP_0.75.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.737500e+02 4.626061e+02 3.319979e-03 1520 1 - 10 2.450000e+02 4.658509e+02 3.421998e-02 1844 1 + 1 3.737500e+02 4.626061e+02 3.531933e-03 1520 1 + 10 2.450000e+02 4.658509e+02 3.666282e-02 1844 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.421998e-02 +total time (s) : 3.666282e-02 total solves : 1844 best bound : 4.658509e+02 simulation ci : 3.907376e+02 ± 9.045105e+01 diff --git a/dev/examples/SDDP_0.875.log b/dev/examples/SDDP_0.875.log index 1d2b8cb04..be0742b2f 100644 --- a/dev/examples/SDDP_0.875.log +++ b/dev/examples/SDDP_0.875.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.525000e+02 5.197742e+02 3.482103e-03 3004 1 - 10 4.493750e+02 5.211793e+02 3.725910e-02 3328 1 + 1 8.525000e+02 5.197742e+02 3.575087e-03 3004 1 + 10 4.493750e+02 5.211793e+02 3.753400e-02 3328 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.725910e-02 +total time (s) : 3.753400e-02 total solves : 3328 best bound : 5.211793e+02 simulation ci : 5.268125e+02 ± 1.227709e+02 diff --git a/dev/examples/SDDP_1.0.log b/dev/examples/SDDP_1.0.log index e4ec16f8b..a868589d5 100644 --- a/dev/examples/SDDP_1.0.log +++ b/dev/examples/SDDP_1.0.log @@ -20,11 +20,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.750000e+02 5.500000e+02 2.835035e-03 407 1 - 10 4.500000e+02 5.733959e+02 3.007984e-02 731 1 + 1 6.750000e+02 5.500000e+02 2.856970e-03 407 1 + 10 4.500000e+02 5.733959e+02 3.068495e-02 731 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.007984e-02 +total time (s) : 3.068495e-02 total solves : 731 best bound : 5.733959e+02 simulation ci : 5.000000e+02 ± 1.079583e+02 diff --git a/dev/examples/StochDynamicProgramming.jl_multistock/index.html b/dev/examples/StochDynamicProgramming.jl_multistock/index.html index e8511feba..bd06b1046 100644 --- a/dev/examples/StochDynamicProgramming.jl_multistock/index.html +++ b/dev/examples/StochDynamicProgramming.jl_multistock/index.html @@ -80,21 +80,21 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -3.878303e+00 -4.434982e+00 1.919680e-01 1400 1 - 20 -4.262885e+00 -4.399265e+00 3.110759e-01 2800 1 - 30 -3.075162e+00 -4.382527e+00 4.385228e-01 4200 1 - 40 -3.761147e+00 -4.369587e+00 5.714748e-01 5600 1 - 50 -4.323162e+00 -4.362199e+00 7.641728e-01 7000 1 - 60 -3.654943e+00 -4.358401e+00 9.058180e-01 8400 1 - 70 -4.010883e+00 -4.357368e+00 1.048839e+00 9800 1 - 80 -4.314412e+00 -4.355714e+00 1.196367e+00 11200 1 - 90 -4.542422e+00 -4.353708e+00 1.348074e+00 12600 1 - 100 -4.178952e+00 -4.351685e+00 1.497844e+00 14000 1 + 10 -3.878303e+00 -4.434982e+00 1.942959e-01 1400 1 + 20 -4.262885e+00 -4.399265e+00 3.131590e-01 2800 1 + 30 -3.075162e+00 -4.382527e+00 4.379020e-01 4200 1 + 40 -3.761147e+00 -4.369587e+00 5.726449e-01 5600 1 + 50 -4.323162e+00 -4.362199e+00 7.124569e-01 7000 1 + 60 -3.654943e+00 -4.358401e+00 8.545969e-01 8400 1 + 70 -4.010883e+00 -4.357368e+00 9.996049e-01 9800 1 + 80 -4.314412e+00 -4.355714e+00 1.203137e+00 11200 1 + 90 -4.542422e+00 -4.353708e+00 1.356603e+00 12600 1 + 100 -4.178952e+00 -4.351685e+00 1.505242e+00 14000 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.497844e+00 +total time (s) : 1.505242e+00 total solves : 14000 best bound : -4.351685e+00 simulation ci : -4.246786e+00 ± 8.703997e-02 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/StochDynamicProgramming.jl_stock/index.html b/dev/examples/StochDynamicProgramming.jl_stock/index.html index a6ff2ed7d..891906a79 100644 --- a/dev/examples/StochDynamicProgramming.jl_stock/index.html +++ b/dev/examples/StochDynamicProgramming.jl_stock/index.html @@ -57,18 +57,18 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -1.573154e+00 -1.474247e+00 6.948113e-02 1050 1 - 20 -1.346690e+00 -1.471483e+00 1.082630e-01 1600 1 - 30 -1.308031e+00 -1.471307e+00 1.922801e-01 2650 1 - 40 -1.401200e+00 -1.471167e+00 2.350020e-01 3200 1 - 50 -1.557483e+00 -1.471097e+00 3.224430e-01 4250 1 - 60 -1.534169e+00 -1.471075e+00 3.679681e-01 4800 1 - 65 -1.689864e+00 -1.471075e+00 3.907061e-01 5075 1 + 10 -1.573154e+00 -1.474247e+00 6.942701e-02 1050 1 + 20 -1.346690e+00 -1.471483e+00 1.086791e-01 1600 1 + 30 -1.308031e+00 -1.471307e+00 1.911941e-01 2650 1 + 40 -1.401200e+00 -1.471167e+00 2.329950e-01 3200 1 + 50 -1.557483e+00 -1.471097e+00 3.202279e-01 4250 1 + 60 -1.534169e+00 -1.471075e+00 3.649149e-01 4800 1 + 65 -1.689864e+00 -1.471075e+00 3.875370e-01 5075 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.907061e-01 +total time (s) : 3.875370e-01 total solves : 5075 best bound : -1.471075e+00 simulation ci : -1.484094e+00 ± 4.058993e-02 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html b/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html index e14861ce9..351f2921d 100644 --- a/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html +++ b/dev/examples/StructDualDynProg.jl_prob5.2_2stages/index.html @@ -85,16 +85,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 3.455904e+05 3.147347e+05 8.068085e-03 54 1 - 20 3.336455e+05 3.402383e+05 1.415706e-02 104 1 - 30 3.337559e+05 3.403155e+05 2.132511e-02 158 1 - 40 3.337559e+05 3.403155e+05 2.832103e-02 208 1 - 48 3.337559e+05 3.403155e+05 3.439808e-02 248 1 + 10 3.455904e+05 3.147347e+05 8.111954e-03 54 1 + 20 3.336455e+05 3.402383e+05 1.425099e-02 104 1 + 30 3.337559e+05 3.403155e+05 2.154803e-02 158 1 + 40 3.337559e+05 3.403155e+05 2.868700e-02 208 1 + 48 3.337559e+05 3.403155e+05 3.480482e-02 248 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.439808e-02 +total time (s) : 3.480482e-02 total solves : 248 best bound : 3.403155e+05 simulation ci : 1.351676e+08 ± 1.785770e+08 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html b/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html index 516909f8f..87657a83a 100644 --- a/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html +++ b/dev/examples/StructDualDynProg.jl_prob5.2_3stages/index.html @@ -81,16 +81,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.403329e+05 3.509666e+05 1.356816e-02 92 1 - 20 4.055335e+05 4.054833e+05 2.453518e-02 172 1 - 30 3.959476e+05 4.067125e+05 3.774214e-02 264 1 - 40 3.959476e+05 4.067125e+05 5.097198e-02 344 1 - 47 3.959476e+05 4.067125e+05 6.104016e-02 400 1 + 10 4.403329e+05 3.509666e+05 1.326609e-02 92 1 + 20 4.055335e+05 4.054833e+05 2.928090e-02 172 1 + 30 3.959476e+05 4.067125e+05 4.300404e-02 264 1 + 40 3.959476e+05 4.067125e+05 5.655909e-02 344 1 + 47 3.959476e+05 4.067125e+05 6.679392e-02 400 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.104016e-02 +total time (s) : 6.679392e-02 total solves : 400 best bound : 4.067125e+05 simulation ci : 2.695623e+07 ± 3.645336e+07 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/agriculture_mccardle_farm/index.html b/dev/examples/agriculture_mccardle_farm/index.html index 7100457c5..82f3f7bf6 100644 --- a/dev/examples/agriculture_mccardle_farm/index.html +++ b/dev/examples/agriculture_mccardle_farm/index.html @@ -124,4 +124,4 @@ @test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5 end -test_mccardle_farm_model()
Test Passed
+test_mccardle_farm_model()
Test Passed
diff --git a/dev/examples/air_conditioning/index.html b/dev/examples/air_conditioning/index.html index 90ba15d9e..2e02d9a35 100644 --- a/dev/examples/air_conditioning/index.html +++ b/dev/examples/air_conditioning/index.html @@ -76,11 +76,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 7.000000e+04 6.166667e+04 5.635211e-01 8 1 - 40L 5.500000e+04 6.250000e+04 8.101661e-01 344 1 + 1L 7.000000e+04 6.166667e+04 5.657871e-01 8 1 + 40L 5.500000e+04 6.250000e+04 8.762000e-01 344 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 8.101661e-01 +total time (s) : 8.762000e-01 total solves : 344 best bound : 6.250000e+04 simulation ci : 6.091250e+04 ± 6.325667e+03 @@ -115,11 +115,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.000000e+04 6.250000e+04 3.736019e-03 8 1 - 20 6.000000e+04 6.250000e+04 4.383206e-02 172 1 + 1 3.000000e+04 6.250000e+04 3.748894e-03 8 1 + 20 6.000000e+04 6.250000e+04 5.618000e-02 172 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.383206e-02 +total time (s) : 5.618000e-02 total solves : 172 best bound : 6.250000e+04 simulation ci : 5.675000e+04 ± 6.792430e+03 @@ -127,4 +127,4 @@ ------------------------------------------------------------------- Lower bound is: 62500.0 -With first stage solutions 200.0 (production) and 100.0 (stored_production). +With first stage solutions 200.0 (production) and 100.0 (stored_production). diff --git a/dev/examples/air_conditioning_forward/index.html b/dev/examples/air_conditioning_forward/index.html index a74fc1411..b408e2c8c 100644 --- a/dev/examples/air_conditioning_forward/index.html +++ b/dev/examples/air_conditioning_forward/index.html @@ -37,4 +37,4 @@ iteration_limit = 10, ) Test.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1) -Test.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)
Test Passed
+Test.@test isapprox(SDDP.calculate_bound(convex), 62_500.0, atol = 0.1)
Test Passed
diff --git a/dev/examples/all_blacks/index.html b/dev/examples/all_blacks/index.html index 4c31fb6f8..b0138e659 100644 --- a/dev/examples/all_blacks/index.html +++ b/dev/examples/all_blacks/index.html @@ -61,13 +61,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 9.000000e+00 3.800106e-02 6 1 - 20L 9.000000e+00 9.000000e+00 7.868314e-02 123 1 + 1L 6.000000e+00 9.000000e+00 3.926110e-02 6 1 + 20L 9.000000e+00 9.000000e+00 7.914114e-02 123 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.868314e-02 +total time (s) : 7.914114e-02 total solves : 123 best bound : 9.000000e+00 simulation ci : 8.850000e+00 ± 2.940000e-01 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/asset_management_simple/index.html b/dev/examples/asset_management_simple/index.html index 2dfbc6f22..ab8c5b2f9 100644 --- a/dev/examples/asset_management_simple/index.html +++ b/dev/examples/asset_management_simple/index.html @@ -74,19 +74,19 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 5 -5.684342e-14 1.184830e+00 1.329207e-02 87 1 - 10 5.012507e+01 1.508277e+00 1.996207e-02 142 1 - 15 -1.428571e+00 1.514085e+00 2.714801e-02 197 1 - 20 7.105427e-14 1.514085e+00 3.475404e-02 252 1 - 25 -3.979039e-13 1.514085e+00 8.906603e-02 339 1 - 30 -1.428571e+00 1.514085e+00 9.759498e-02 394 1 - 35 -1.428571e+00 1.514085e+00 1.068540e-01 449 1 - 40 0.000000e+00 1.514085e+00 1.408000e-01 504 1 + 5 -5.684342e-14 1.184830e+00 1.335406e-02 87 1 + 10 5.012507e+01 1.508277e+00 1.991916e-02 142 1 + 15 -1.428571e+00 1.514085e+00 2.718306e-02 197 1 + 20 7.105427e-14 1.514085e+00 3.494310e-02 252 1 + 25 -3.979039e-13 1.514085e+00 9.148598e-02 339 1 + 30 -1.428571e+00 1.514085e+00 1.001492e-01 394 1 + 35 -1.428571e+00 1.514085e+00 1.093709e-01 449 1 + 40 0.000000e+00 1.514085e+00 1.191351e-01 504 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.408000e-01 +total time (s) : 1.191351e-01 total solves : 504 best bound : 1.514085e+00 simulation ci : 2.863132e+00 ± 6.778637e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/asset_management_stagewise/index.html b/dev/examples/asset_management_stagewise/index.html index 45e5094d2..5de73ac9f 100644 --- a/dev/examples/asset_management_stagewise/index.html +++ b/dev/examples/asset_management_stagewise/index.html @@ -91,14 +91,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 1.100409e+00 1.301856e+00 1.554060e-01 278 1 - 20 1.263098e+01 1.278410e+00 1.754270e-01 428 1 - 30 -5.003795e+01 1.278410e+00 2.089329e-01 706 1 - 40 6.740000e+00 1.278410e+00 2.321758e-01 856 1 - 44 1.111084e+01 1.278410e+00 2.419429e-01 916 1 + 10 1.100409e+00 1.301856e+00 1.572721e-01 278 1 + 20 1.263098e+01 1.278410e+00 1.776161e-01 428 1 + 30 -5.003795e+01 1.278410e+00 2.127352e-01 706 1 + 40 6.740000e+00 1.278410e+00 2.364390e-01 856 1 + 44 1.111084e+01 1.278410e+00 2.464862e-01 916 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.419429e-01 +total time (s) : 2.464862e-01 total solves : 916 best bound : 1.278410e+00 simulation ci : 4.090025e+00 ± 5.358375e+00 @@ -130,15 +130,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.007061e+00 1.281639e+00 3.613997e-02 278 1 - 20 1.426676e+01 1.278410e+00 6.434894e-02 428 1 - 30 1.522212e+00 1.278410e+00 1.093080e-01 706 1 - 40 -4.523775e+01 1.278410e+00 1.482601e-01 856 1 + 10 2.007061e+00 1.281639e+00 3.626704e-02 278 1 + 20 1.426676e+01 1.278410e+00 6.425691e-02 428 1 + 30 1.522212e+00 1.278410e+00 1.093450e-01 706 1 + 40 -4.523775e+01 1.278410e+00 1.479840e-01 856 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.482601e-01 +total time (s) : 1.479840e-01 total solves : 856 best bound : 1.278410e+00 simulation ci : 1.019480e+00 ± 6.246418e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/belief/index.html b/dev/examples/belief/index.html index 20854c918..1d7940a2e 100644 --- a/dev/examples/belief/index.html +++ b/dev/examples/belief/index.html @@ -94,21 +94,21 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.787277e+00 9.346930e+00 1.381464e+00 900 1 - 20 6.374753e+00 1.361934e+01 1.551260e+00 1720 1 - 30 2.813321e+01 1.651297e+01 1.913003e+00 3036 1 - 40 1.654759e+01 1.632970e+01 2.283337e+00 4192 1 - 50 3.570941e+00 1.846889e+01 2.542172e+00 5020 1 - 60 1.087425e+01 1.890254e+01 2.836630e+00 5808 1 - 70 9.381610e+00 1.940320e+01 3.133666e+00 6540 1 - 80 5.648731e+01 1.962435e+01 3.355766e+00 7088 1 - 90 3.879273e+01 1.981008e+01 3.854946e+00 8180 1 - 100 7.870187e+00 1.997117e+01 4.082399e+00 8664 1 + 10 4.787277e+00 9.346930e+00 1.425595e+00 900 1 + 20 6.374753e+00 1.361934e+01 1.597725e+00 1720 1 + 30 2.813321e+01 1.651297e+01 1.927750e+00 3036 1 + 40 1.654759e+01 1.632970e+01 2.297534e+00 4192 1 + 50 3.570941e+00 1.846889e+01 2.570108e+00 5020 1 + 60 1.087425e+01 1.890254e+01 2.947895e+00 5808 1 + 70 9.381610e+00 1.940320e+01 3.244324e+00 6540 1 + 80 5.648731e+01 1.962435e+01 4.050954e+00 7088 1 + 90 3.879273e+01 1.981008e+01 4.613095e+00 8180 1 + 100 7.870187e+00 1.997117e+01 4.943725e+00 8664 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.082399e+00 +total time (s) : 4.943725e+00 total solves : 8664 best bound : 1.997117e+01 simulation ci : 2.275399e+01 ± 4.541987e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/biobjective_hydro/index.html b/dev/examples/biobjective_hydro/index.html index 6e3c2c398..007aea608 100644 --- a/dev/examples/biobjective_hydro/index.html +++ b/dev/examples/biobjective_hydro/index.html @@ -80,11 +80,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 0.000000e+00 8.840084e-03 36 1 - 10 0.000000e+00 0.000000e+00 2.838302e-02 360 1 + 1 0.000000e+00 0.000000e+00 9.399176e-03 36 1 + 10 0.000000e+00 0.000000e+00 2.939820e-02 360 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.838302e-02 +total time (s) : 2.939820e-02 total solves : 360 best bound : 0.000000e+00 simulation ci : 0.000000e+00 ± 0.000000e+00 @@ -113,11 +113,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.750000e+02 5.500000e+02 2.835035e-03 407 1 - 10 4.500000e+02 5.733959e+02 3.007984e-02 731 1 + 1 6.750000e+02 5.500000e+02 2.856970e-03 407 1 + 10 4.500000e+02 5.733959e+02 3.068495e-02 731 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.007984e-02 +total time (s) : 3.068495e-02 total solves : 731 best bound : 5.733959e+02 simulation ci : 5.000000e+02 ± 1.079583e+02 @@ -146,11 +146,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.850000e+02 3.349793e+02 3.066063e-03 778 1 - 10 3.550000e+02 3.468286e+02 6.175089e-02 1102 1 + 1 4.850000e+02 3.349793e+02 3.223181e-03 778 1 + 10 3.550000e+02 3.468286e+02 3.217816e-02 1102 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 6.175089e-02 +total time (s) : 3.217816e-02 total solves : 1102 best bound : 3.468286e+02 simulation ci : 3.948309e+02 ± 7.954180e+01 @@ -179,11 +179,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.887500e+02 1.995243e+02 2.810001e-03 1149 1 - 10 2.962500e+02 2.052855e+02 3.052688e-02 1473 1 + 1 1.887500e+02 1.995243e+02 2.953053e-03 1149 1 + 10 2.962500e+02 2.052855e+02 3.094411e-02 1473 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.052688e-02 +total time (s) : 3.094411e-02 total solves : 1473 best bound : 2.052855e+02 simulation ci : 2.040201e+02 ± 3.876873e+01 @@ -212,11 +212,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.737500e+02 4.626061e+02 3.319979e-03 1520 1 - 10 2.450000e+02 4.658509e+02 3.421998e-02 1844 1 + 1 3.737500e+02 4.626061e+02 3.531933e-03 1520 1 + 10 2.450000e+02 4.658509e+02 3.666282e-02 1844 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.421998e-02 +total time (s) : 3.666282e-02 total solves : 1844 best bound : 4.658509e+02 simulation ci : 3.907376e+02 ± 9.045105e+01 @@ -245,11 +245,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.675000e+02 1.129545e+02 2.846956e-03 1891 1 - 10 1.362500e+02 1.129771e+02 3.016996e-02 2215 1 + 1 1.675000e+02 1.129545e+02 3.082991e-03 1891 1 + 10 1.362500e+02 1.129771e+02 3.142214e-02 2215 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.016996e-02 +total time (s) : 3.142214e-02 total solves : 2215 best bound : 1.129771e+02 simulation ci : 1.176375e+02 ± 1.334615e+01 @@ -278,11 +278,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.562500e+02 2.788373e+02 3.228903e-03 2262 1 - 10 2.375000e+02 2.795671e+02 5.227590e-02 2586 1 + 1 2.562500e+02 2.788373e+02 3.308058e-03 2262 1 + 10 2.375000e+02 2.795671e+02 3.411508e-02 2586 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.227590e-02 +total time (s) : 3.411508e-02 total solves : 2586 best bound : 2.795671e+02 simulation ci : 2.375000e+02 ± 3.099032e+01 @@ -311,11 +311,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.812500e+02 4.072952e+02 3.688812e-03 2633 1 - 10 5.818750e+02 4.080500e+02 3.585982e-02 2957 1 + 1 3.812500e+02 4.072952e+02 3.736019e-03 2633 1 + 10 5.818750e+02 4.080500e+02 3.661990e-02 2957 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.585982e-02 +total time (s) : 3.661990e-02 total solves : 2957 best bound : 4.080500e+02 simulation ci : 4.235323e+02 ± 1.029245e+02 @@ -344,11 +344,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 8.525000e+02 5.197742e+02 3.482103e-03 3004 1 - 10 4.493750e+02 5.211793e+02 3.725910e-02 3328 1 + 1 8.525000e+02 5.197742e+02 3.575087e-03 3004 1 + 10 4.493750e+02 5.211793e+02 3.753400e-02 3328 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.725910e-02 +total time (s) : 3.753400e-02 total solves : 3328 best bound : 5.211793e+02 simulation ci : 5.268125e+02 ± 1.227709e+02 @@ -377,13 +377,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.437500e+01 5.937500e+01 3.201008e-03 3375 1 - 10 3.750000e+01 5.938557e+01 3.138804e-02 3699 1 + 1 3.437500e+01 5.937500e+01 3.830910e-03 3375 1 + 10 3.750000e+01 5.938557e+01 3.281999e-02 3699 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 3.138804e-02 +total time (s) : 3.281999e-02 total solves : 3699 best bound : 5.938557e+01 simulation ci : 5.906250e+01 ± 1.352595e+01 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/booking_management/index.html b/dev/examples/booking_management/index.html index 80fe99ee1..65d5dd7b3 100644 --- a/dev/examples/booking_management/index.html +++ b/dev/examples/booking_management/index.html @@ -96,4 +96,4 @@ end end -booking_management(SDDP.ContinuousConicDuality())
Test Passed

New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())

+booking_management(SDDP.ContinuousConicDuality())
Test Passed

New version of HiGHS stalls booking_management(SDDP.LagrangianDuality())

diff --git a/dev/examples/generation_expansion/index.html b/dev/examples/generation_expansion/index.html index 2c28993de..58572224f 100644 --- a/dev/examples/generation_expansion/index.html +++ b/dev/examples/generation_expansion/index.html @@ -115,15 +115,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.549668e+06 2.078257e+06 5.127652e-01 920 1 - 20 5.494568e+05 2.078257e+06 7.066481e-01 1340 1 - 30 4.985879e+04 2.078257e+06 1.253876e+00 2260 1 - 40 3.799447e+06 2.078257e+06 1.449975e+00 2680 1 - 50 1.049867e+06 2.078257e+06 1.994528e+00 3600 1 - 60 3.985191e+04 2.078257e+06 2.250975e+00 4020 1 + 10 2.549668e+06 2.078257e+06 5.411520e-01 920 1 + 20 5.494568e+05 2.078257e+06 7.411749e-01 1340 1 + 30 4.985879e+04 2.078257e+06 1.292261e+00 2260 1 + 40 3.799447e+06 2.078257e+06 1.495686e+00 2680 1 + 50 1.049867e+06 2.078257e+06 2.063266e+00 3600 1 + 60 3.985191e+04 2.078257e+06 2.269838e+00 4020 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.250975e+00 +total time (s) : 2.269838e+00 total solves : 4020 best bound : 2.078257e+06 simulation ci : 2.031697e+06 ± 3.922745e+05 @@ -157,17 +157,17 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10L 4.986663e+04 2.079119e+06 9.420180e-01 920 1 - 20L 3.799878e+06 2.079330e+06 1.653278e+00 1340 1 - 30L 3.003923e+04 2.079457e+06 2.755824e+00 2260 1 - 40L 5.549882e+06 2.079457e+06 3.543915e+00 2680 1 - 50L 2.799466e+06 2.079457e+06 4.709486e+00 3600 1 - 60L 3.549880e+06 2.079457e+06 5.470320e+00 4020 1 + 10L 4.986663e+04 2.079119e+06 9.979272e-01 920 1 + 20L 3.799878e+06 2.079330e+06 1.750540e+00 1340 1 + 30L 3.003923e+04 2.079457e+06 2.912281e+00 2260 1 + 40L 5.549882e+06 2.079457e+06 3.757357e+00 2680 1 + 50L 2.799466e+06 2.079457e+06 4.981082e+00 3600 1 + 60L 3.549880e+06 2.079457e+06 5.788100e+00 4020 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.470320e+00 +total time (s) : 5.788100e+00 total solves : 4020 best bound : 2.079457e+06 simulation ci : 2.352204e+06 ± 5.377531e+05 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/hydro_valley/index.html b/dev/examples/hydro_valley/index.html index adea50fa8..82aad5de7 100644 --- a/dev/examples/hydro_valley/index.html +++ b/dev/examples/hydro_valley/index.html @@ -280,4 +280,4 @@ ### = $835 end -test_hydro_valley_model()
Test Passed
+test_hydro_valley_model()
Test Passed
diff --git a/dev/examples/infinite_horizon_hydro_thermal/index.html b/dev/examples/infinite_horizon_hydro_thermal/index.html index ea5ed0bb6..18b11a3f0 100644 --- a/dev/examples/infinite_horizon_hydro_thermal/index.html +++ b/dev/examples/infinite_horizon_hydro_thermal/index.html @@ -93,13 +93,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 2.500000e+01 1.188965e+02 7.887051e-01 1946 1 - 200 2.500000e+01 1.191634e+02 1.009564e+00 3920 1 - 300 0.000000e+00 1.191666e+02 1.224198e+00 5902 1 - 330 2.500000e+01 1.191667e+02 1.266861e+00 6224 1 + 100 2.500000e+01 1.188965e+02 8.310049e-01 1946 1 + 200 2.500000e+01 1.191634e+02 1.044859e+00 3920 1 + 300 0.000000e+00 1.191666e+02 1.265158e+00 5902 1 + 330 2.500000e+01 1.191667e+02 1.308159e+00 6224 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.266861e+00 +total time (s) : 1.308159e+00 total solves : 6224 best bound : 1.191667e+02 simulation ci : 2.158333e+01 ± 3.290252e+00 @@ -132,16 +132,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 100 0.000000e+00 1.191285e+02 2.892599e-01 2874 1 - 200 2.500000e+00 1.191666e+02 5.641530e-01 4855 1 - 282 7.500000e+00 1.191667e+02 6.984270e-01 5733 1 + 100 0.000000e+00 1.191285e+02 2.998888e-01 2874 1 + 200 2.500000e+00 1.191666e+02 5.349660e-01 4855 1 + 282 7.500000e+00 1.191667e+02 6.698999e-01 5733 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.984270e-01 +total time (s) : 6.698999e-01 total solves : 5733 best bound : 1.191667e+02 simulation ci : 2.104610e+01 ± 3.492245e+00 numeric issues : 0 ------------------------------------------------------------------- -Confidence_interval = 116.06 ± 13.65 +Confidence_interval = 116.06 ± 13.65 diff --git a/dev/examples/infinite_horizon_trivial/index.html b/dev/examples/infinite_horizon_trivial/index.html index 469716254..9a85dd24b 100644 --- a/dev/examples/infinite_horizon_trivial/index.html +++ b/dev/examples/infinite_horizon_trivial/index.html @@ -49,15 +49,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.000000e+00 1.997089e+01 7.021093e-02 1204 1 - 20 8.000000e+00 2.000000e+01 9.124708e-02 1420 1 - 30 1.600000e+01 2.000000e+01 1.582351e-01 2628 1 - 40 8.000000e+00 2.000000e+01 1.809099e-01 2834 1 + 10 4.000000e+00 1.997089e+01 1.078000e-01 1204 1 + 20 8.000000e+00 2.000000e+01 1.287360e-01 1420 1 + 30 1.600000e+01 2.000000e+01 1.973190e-01 2628 1 + 40 8.000000e+00 2.000000e+01 2.192059e-01 2834 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.809099e-01 +total time (s) : 2.192059e-01 total solves : 2834 best bound : 2.000000e+01 simulation ci : 1.625000e+01 ± 4.766381e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/no_strong_duality/index.html b/dev/examples/no_strong_duality/index.html index 6ee895f51..f0cc0672e 100644 --- a/dev/examples/no_strong_duality/index.html +++ b/dev/examples/no_strong_duality/index.html @@ -48,13 +48,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.000000e+00 1.500000e+00 1.583099e-03 3 1 - 40 4.000000e+00 2.000000e+00 4.327106e-02 578 1 + 1 1.000000e+00 1.500000e+00 1.635075e-03 3 1 + 40 4.000000e+00 2.000000e+00 4.523492e-02 578 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.327106e-02 +total time (s) : 4.523492e-02 total solves : 578 best bound : 2.000000e+00 simulation ci : 1.950000e+00 ± 5.568095e-01 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/objective_state_newsvendor/index.html b/dev/examples/objective_state_newsvendor/index.html index 4fdacd4a8..244991c83 100644 --- a/dev/examples/objective_state_newsvendor/index.html +++ b/dev/examples/objective_state_newsvendor/index.html @@ -93,137 +93,135 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 5.250000e+00 4.888859e+00 1.687591e-01 1350 1 - 20 4.350000e+00 4.105855e+00 2.892880e-01 2700 1 - 30 5.000000e+00 4.100490e+00 3.864150e-01 4050 1 - 40 3.500000e+00 4.097376e+00 4.918370e-01 5400 1 - 50 5.250000e+00 4.095859e+00 6.017079e-01 6750 1 - 60 3.643750e+00 4.093342e+00 7.160020e-01 8100 1 - 70 2.643750e+00 4.091818e+00 8.286331e-01 9450 1 - 80 5.087500e+00 4.091591e+00 9.450080e-01 10800 1 - 90 5.062500e+00 4.091309e+00 1.061172e+00 12150 1 - 100 4.843750e+00 4.087004e+00 1.186036e+00 13500 1 - 110 3.437500e+00 4.086094e+00 1.311120e+00 14850 1 - 120 3.375000e+00 4.085926e+00 1.437098e+00 16200 1 - 130 5.025000e+00 4.085866e+00 1.565217e+00 17550 1 - 140 5.000000e+00 4.085734e+00 1.693743e+00 18900 1 - 150 3.500000e+00 4.085655e+00 1.824377e+00 20250 1 - 160 4.281250e+00 4.085454e+00 1.953747e+00 21600 1 - 170 4.562500e+00 4.085425e+00 2.085516e+00 22950 1 - 180 5.768750e+00 4.085425e+00 2.216371e+00 24300 1 - 190 3.468750e+00 4.085359e+00 2.353678e+00 25650 1 - 200 4.131250e+00 4.085225e+00 2.489963e+00 27000 1 - 210 4.512500e+00 4.085157e+00 2.662310e+00 28350 1 - 220 4.900000e+00 4.085153e+00 2.796681e+00 29700 1 - 230 4.025000e+00 4.085134e+00 2.934796e+00 31050 1 - 240 4.468750e+00 4.085116e+00 3.078804e+00 32400 1 - 250 4.062500e+00 4.085075e+00 3.218615e+00 33750 1 - 260 4.875000e+00 4.085037e+00 3.361501e+00 35100 1 - 270 3.850000e+00 4.085011e+00 3.505550e+00 36450 1 - 280 4.912500e+00 4.084992e+00 3.648949e+00 37800 1 - 290 2.987500e+00 4.084986e+00 3.797585e+00 39150 1 - 300 3.825000e+00 4.084957e+00 3.948527e+00 40500 1 - 310 3.250000e+00 4.084911e+00 4.099108e+00 41850 1 - 320 3.600000e+00 4.084896e+00 4.247181e+00 43200 1 - 330 3.925000e+00 4.084896e+00 4.385317e+00 44550 1 - 340 4.500000e+00 4.084893e+00 4.531839e+00 45900 1 - 350 5.000000e+00 4.084891e+00 4.678074e+00 47250 1 - 360 3.075000e+00 4.084866e+00 4.823599e+00 48600 1 - 370 3.500000e+00 4.084861e+00 4.976336e+00 49950 1 - 380 3.356250e+00 4.084857e+00 5.132696e+00 51300 1 - 390 5.500000e+00 4.084846e+00 5.293799e+00 52650 1 - 400 4.475000e+00 4.084846e+00 5.444030e+00 54000 1 - 410 3.750000e+00 4.084843e+00 5.597501e+00 55350 1 - 420 3.687500e+00 4.084843e+00 5.778766e+00 56700 1 - 430 4.337500e+00 4.084825e+00 5.939407e+00 58050 1 - 440 5.750000e+00 4.084825e+00 6.085826e+00 59400 1 - 450 4.925000e+00 4.084792e+00 6.249531e+00 60750 1 - 460 3.600000e+00 4.084792e+00 6.406852e+00 62100 1 - 470 4.387500e+00 4.084792e+00 6.560811e+00 63450 1 - 480 4.000000e+00 4.084792e+00 6.725306e+00 64800 1 - 490 2.975000e+00 4.084788e+00 6.882730e+00 66150 1 - 500 3.125000e+00 4.084788e+00 7.038350e+00 67500 1 - 510 4.250000e+00 4.084788e+00 7.202558e+00 68850 1 - 520 4.512500e+00 4.084786e+00 7.356405e+00 70200 1 - 530 3.875000e+00 4.084786e+00 7.520775e+00 71550 1 - 540 4.387500e+00 4.084781e+00 7.685258e+00 72900 1 - 550 5.281250e+00 4.084780e+00 7.851898e+00 74250 1 - 560 4.650000e+00 4.084780e+00 8.006791e+00 75600 1 - 570 3.062500e+00 4.084780e+00 8.165572e+00 76950 1 - 580 3.187500e+00 4.084780e+00 8.318219e+00 78300 1 - 590 3.812500e+00 4.084780e+00 8.467485e+00 79650 1 - 600 3.637500e+00 4.084774e+00 8.628129e+00 81000 1 - 610 3.950000e+00 4.084765e+00 8.814753e+00 82350 1 - 620 4.625000e+00 4.084760e+00 8.973007e+00 83700 1 - 630 4.218750e+00 4.084760e+00 9.134514e+00 85050 1 - 640 3.025000e+00 4.084755e+00 9.300601e+00 86400 1 - 650 2.993750e+00 4.084751e+00 9.454259e+00 87750 1 - 660 3.262500e+00 4.084746e+00 9.613243e+00 89100 1 - 670 3.625000e+00 4.084746e+00 9.777580e+00 90450 1 - 680 2.981250e+00 4.084746e+00 9.943775e+00 91800 1 - 690 4.187500e+00 4.084746e+00 1.010451e+01 93150 1 - 700 4.500000e+00 4.084746e+00 1.026283e+01 94500 1 - 710 3.225000e+00 4.084746e+00 1.042250e+01 95850 1 - 720 4.375000e+00 4.084746e+00 1.058527e+01 97200 1 - 730 2.650000e+00 4.084746e+00 1.075253e+01 98550 1 - 740 3.250000e+00 4.084746e+00 1.091233e+01 99900 1 - 750 4.725000e+00 4.084746e+00 1.108550e+01 101250 1 - 760 3.375000e+00 4.084746e+00 1.125864e+01 102600 1 - 770 5.375000e+00 4.084746e+00 1.142763e+01 103950 1 - 780 4.068750e+00 4.084746e+00 1.159928e+01 105300 1 - 790 4.412500e+00 4.084746e+00 1.179781e+01 106650 1 - 800 4.350000e+00 4.084746e+00 1.197417e+01 108000 1 - 810 5.887500e+00 4.084746e+00 1.214956e+01 109350 1 - 820 4.912500e+00 4.084746e+00 1.231838e+01 110700 1 - 830 4.387500e+00 4.084746e+00 1.248129e+01 112050 1 - 840 3.675000e+00 4.084746e+00 1.265265e+01 113400 1 - 850 5.375000e+00 4.084746e+00 1.282129e+01 114750 1 - 860 3.562500e+00 4.084746e+00 1.299733e+01 116100 1 - 870 3.075000e+00 4.084746e+00 1.317557e+01 117450 1 - 880 3.625000e+00 4.084746e+00 1.334710e+01 118800 1 - 890 2.937500e+00 4.084746e+00 1.351453e+01 120150 1 - 900 4.450000e+00 4.084746e+00 1.369199e+01 121500 1 - 910 4.200000e+00 4.084746e+00 1.386539e+01 122850 1 - 920 3.687500e+00 4.084746e+00 1.404455e+01 124200 1 - 930 4.725000e+00 4.084746e+00 1.422308e+01 125550 1 - 940 4.018750e+00 4.084746e+00 1.439535e+01 126900 1 - 950 4.675000e+00 4.084746e+00 1.456329e+01 128250 1 - 960 3.375000e+00 4.084746e+00 1.475449e+01 129600 1 - 970 3.812500e+00 4.084746e+00 1.491792e+01 130950 1 - 980 3.112500e+00 4.084746e+00 1.508757e+01 132300 1 - 990 3.600000e+00 4.084746e+00 1.525910e+01 133650 1 - 1000 5.500000e+00 4.084746e+00 1.543728e+01 135000 1 - 1010 3.187500e+00 4.084746e+00 1.560918e+01 136350 1 - 1020 4.900000e+00 4.084746e+00 1.578396e+01 137700 1 - 1030 3.637500e+00 4.084746e+00 1.597247e+01 139050 1 - 1040 3.975000e+00 4.084746e+00 1.614785e+01 140400 1 - 1050 4.750000e+00 4.084746e+00 1.632423e+01 141750 1 - 1060 4.437500e+00 4.084746e+00 1.652192e+01 143100 1 - 1070 5.000000e+00 4.084746e+00 1.670562e+01 144450 1 - 1080 4.143750e+00 4.084746e+00 1.689072e+01 145800 1 - 1090 5.625000e+00 4.084746e+00 1.708490e+01 147150 1 - 1100 3.475000e+00 4.084746e+00 1.726644e+01 148500 1 - 1110 4.156250e+00 4.084746e+00 1.745366e+01 149850 1 - 1120 4.450000e+00 4.084746e+00 1.763757e+01 151200 1 - 1130 3.312500e+00 4.084741e+00 1.782470e+01 152550 1 - 1140 5.375000e+00 4.084741e+00 1.800408e+01 153900 1 - 1150 4.800000e+00 4.084737e+00 1.819346e+01 155250 1 - 1160 3.300000e+00 4.084737e+00 1.837392e+01 156600 1 - 1170 4.356250e+00 4.084737e+00 1.855866e+01 157950 1 - 1180 3.900000e+00 4.084737e+00 1.874676e+01 159300 1 - 1190 4.450000e+00 4.084737e+00 1.893841e+01 160650 1 - 1200 5.156250e+00 4.084737e+00 1.914838e+01 162000 1 - 1210 4.500000e+00 4.084737e+00 1.932547e+01 163350 1 - 1220 4.875000e+00 4.084737e+00 1.952324e+01 164700 1 - 1230 4.000000e+00 4.084737e+00 1.970620e+01 166050 1 - 1240 4.062500e+00 4.084737e+00 1.989083e+01 167400 1 - 1246 3.000000e+00 4.084737e+00 2.000504e+01 168210 1 + 10 5.250000e+00 4.888859e+00 1.716678e-01 1350 1 + 20 4.350000e+00 4.105855e+00 2.587609e-01 2700 1 + 30 5.000000e+00 4.100490e+00 3.567028e-01 4050 1 + 40 3.500000e+00 4.097376e+00 4.626410e-01 5400 1 + 50 5.250000e+00 4.095859e+00 5.744269e-01 6750 1 + 60 3.643750e+00 4.093342e+00 6.895239e-01 8100 1 + 70 2.643750e+00 4.091818e+00 8.075178e-01 9450 1 + 80 5.087500e+00 4.091591e+00 9.248099e-01 10800 1 + 90 5.062500e+00 4.091309e+00 1.043386e+00 12150 1 + 100 4.843750e+00 4.087004e+00 1.171260e+00 13500 1 + 110 3.437500e+00 4.086094e+00 1.298146e+00 14850 1 + 120 3.375000e+00 4.085926e+00 1.466470e+00 16200 1 + 130 5.025000e+00 4.085866e+00 1.596104e+00 17550 1 + 140 5.000000e+00 4.085734e+00 1.725039e+00 18900 1 + 150 3.500000e+00 4.085655e+00 1.858264e+00 20250 1 + 160 4.281250e+00 4.085454e+00 1.986735e+00 21600 1 + 170 4.562500e+00 4.085425e+00 2.118574e+00 22950 1 + 180 5.768750e+00 4.085425e+00 2.250475e+00 24300 1 + 190 3.468750e+00 4.085359e+00 2.390340e+00 25650 1 + 200 4.131250e+00 4.085225e+00 2.527694e+00 27000 1 + 210 4.512500e+00 4.085157e+00 2.662507e+00 28350 1 + 220 4.900000e+00 4.085153e+00 2.801828e+00 29700 1 + 230 4.025000e+00 4.085134e+00 2.941956e+00 31050 1 + 240 4.468750e+00 4.085116e+00 3.087785e+00 32400 1 + 250 4.062500e+00 4.085075e+00 3.228552e+00 33750 1 + 260 4.875000e+00 4.085037e+00 3.371179e+00 35100 1 + 270 3.850000e+00 4.085011e+00 3.514160e+00 36450 1 + 280 4.912500e+00 4.084992e+00 3.658795e+00 37800 1 + 290 2.987500e+00 4.084986e+00 3.809290e+00 39150 1 + 300 3.825000e+00 4.084957e+00 3.960671e+00 40500 1 + 310 3.250000e+00 4.084911e+00 4.111480e+00 41850 1 + 320 3.600000e+00 4.084896e+00 4.262457e+00 43200 1 + 330 3.925000e+00 4.084896e+00 4.402288e+00 44550 1 + 340 4.500000e+00 4.084893e+00 4.550622e+00 45900 1 + 350 5.000000e+00 4.084891e+00 4.735421e+00 47250 1 + 360 3.075000e+00 4.084866e+00 4.884727e+00 48600 1 + 370 3.500000e+00 4.084861e+00 5.037859e+00 49950 1 + 380 3.356250e+00 4.084857e+00 5.191349e+00 51300 1 + 390 5.500000e+00 4.084846e+00 5.357842e+00 52650 1 + 400 4.475000e+00 4.084846e+00 5.517082e+00 54000 1 + 410 3.750000e+00 4.084843e+00 5.675294e+00 55350 1 + 420 3.687500e+00 4.084843e+00 5.839933e+00 56700 1 + 430 4.337500e+00 4.084825e+00 6.008952e+00 58050 1 + 440 5.750000e+00 4.084825e+00 6.159086e+00 59400 1 + 450 4.925000e+00 4.084792e+00 6.328141e+00 60750 1 + 460 3.600000e+00 4.084792e+00 6.491750e+00 62100 1 + 470 4.387500e+00 4.084792e+00 6.648771e+00 63450 1 + 480 4.000000e+00 4.084792e+00 6.817950e+00 64800 1 + 490 2.975000e+00 4.084788e+00 6.977072e+00 66150 1 + 500 3.125000e+00 4.084788e+00 7.132193e+00 67500 1 + 510 4.250000e+00 4.084788e+00 7.302382e+00 68850 1 + 520 4.512500e+00 4.084786e+00 7.458969e+00 70200 1 + 530 3.875000e+00 4.084786e+00 7.624709e+00 71550 1 + 540 4.387500e+00 4.084781e+00 7.789676e+00 72900 1 + 550 5.281250e+00 4.084780e+00 7.960666e+00 74250 1 + 560 4.650000e+00 4.084780e+00 8.120844e+00 75600 1 + 570 3.062500e+00 4.084780e+00 8.310673e+00 76950 1 + 580 3.187500e+00 4.084780e+00 8.465287e+00 78300 1 + 590 3.812500e+00 4.084780e+00 8.616217e+00 79650 1 + 600 3.637500e+00 4.084774e+00 8.781291e+00 81000 1 + 610 3.950000e+00 4.084765e+00 8.943795e+00 82350 1 + 620 4.625000e+00 4.084760e+00 9.109773e+00 83700 1 + 630 4.218750e+00 4.084760e+00 9.278603e+00 85050 1 + 640 3.025000e+00 4.084755e+00 9.447766e+00 86400 1 + 650 2.993750e+00 4.084751e+00 9.614077e+00 87750 1 + 660 3.262500e+00 4.084746e+00 9.779014e+00 89100 1 + 670 3.625000e+00 4.084746e+00 9.949214e+00 90450 1 + 680 2.981250e+00 4.084746e+00 1.013076e+01 91800 1 + 690 4.187500e+00 4.084746e+00 1.030210e+01 93150 1 + 700 4.500000e+00 4.084746e+00 1.046893e+01 94500 1 + 710 3.225000e+00 4.084746e+00 1.064120e+01 95850 1 + 720 4.375000e+00 4.084746e+00 1.081753e+01 97200 1 + 730 2.650000e+00 4.084746e+00 1.100032e+01 98550 1 + 740 3.250000e+00 4.084746e+00 1.117048e+01 99900 1 + 750 4.725000e+00 4.084746e+00 1.135055e+01 101250 1 + 760 3.375000e+00 4.084746e+00 1.155643e+01 102600 1 + 770 5.375000e+00 4.084746e+00 1.173506e+01 103950 1 + 780 4.068750e+00 4.084746e+00 1.191593e+01 105300 1 + 790 4.412500e+00 4.084746e+00 1.210244e+01 106650 1 + 800 4.350000e+00 4.084746e+00 1.228223e+01 108000 1 + 810 5.887500e+00 4.084746e+00 1.246085e+01 109350 1 + 820 4.912500e+00 4.084746e+00 1.263127e+01 110700 1 + 830 4.387500e+00 4.084746e+00 1.279979e+01 112050 1 + 840 3.675000e+00 4.084746e+00 1.297574e+01 113400 1 + 850 5.375000e+00 4.084746e+00 1.314960e+01 114750 1 + 860 3.562500e+00 4.084746e+00 1.332984e+01 116100 1 + 870 3.075000e+00 4.084746e+00 1.351357e+01 117450 1 + 880 3.625000e+00 4.084746e+00 1.368602e+01 118800 1 + 890 2.937500e+00 4.084746e+00 1.385575e+01 120150 1 + 900 4.450000e+00 4.084746e+00 1.403830e+01 121500 1 + 910 4.200000e+00 4.084746e+00 1.422259e+01 122850 1 + 920 3.687500e+00 4.084746e+00 1.440789e+01 124200 1 + 930 4.725000e+00 4.084746e+00 1.461609e+01 125550 1 + 940 4.018750e+00 4.084746e+00 1.479063e+01 126900 1 + 950 4.675000e+00 4.084746e+00 1.497216e+01 128250 1 + 960 3.375000e+00 4.084746e+00 1.514355e+01 129600 1 + 970 3.812500e+00 4.084746e+00 1.531842e+01 130950 1 + 980 3.112500e+00 4.084746e+00 1.549276e+01 132300 1 + 990 3.600000e+00 4.084746e+00 1.566755e+01 133650 1 + 1000 5.500000e+00 4.084746e+00 1.584500e+01 135000 1 + 1010 3.187500e+00 4.084746e+00 1.601833e+01 136350 1 + 1020 4.900000e+00 4.084746e+00 1.619413e+01 137700 1 + 1030 3.637500e+00 4.084746e+00 1.638374e+01 139050 1 + 1040 3.975000e+00 4.084746e+00 1.656010e+01 140400 1 + 1050 4.750000e+00 4.084746e+00 1.673823e+01 141750 1 + 1060 4.437500e+00 4.084746e+00 1.693251e+01 143100 1 + 1070 5.000000e+00 4.084746e+00 1.713577e+01 144450 1 + 1080 4.143750e+00 4.084746e+00 1.732494e+01 145800 1 + 1090 5.625000e+00 4.084746e+00 1.750154e+01 147150 1 + 1100 3.475000e+00 4.084746e+00 1.768629e+01 148500 1 + 1110 4.156250e+00 4.084746e+00 1.787517e+01 149850 1 + 1120 4.450000e+00 4.084746e+00 1.806196e+01 151200 1 + 1130 3.312500e+00 4.084741e+00 1.824955e+01 152550 1 + 1140 5.375000e+00 4.084741e+00 1.842986e+01 153900 1 + 1150 4.800000e+00 4.084737e+00 1.862470e+01 155250 1 + 1160 3.300000e+00 4.084737e+00 1.880885e+01 156600 1 + 1170 4.356250e+00 4.084737e+00 1.899097e+01 157950 1 + 1180 3.900000e+00 4.084737e+00 1.918029e+01 159300 1 + 1190 4.450000e+00 4.084737e+00 1.939933e+01 160650 1 + 1200 5.156250e+00 4.084737e+00 1.959207e+01 162000 1 + 1210 4.500000e+00 4.084737e+00 1.977063e+01 163350 1 + 1220 4.875000e+00 4.084737e+00 1.996918e+01 164700 1 + 1222 4.562500e+00 4.084737e+00 2.000839e+01 164970 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.000504e+01 -total solves : 168210 +total time (s) : 2.000839e+01 +total solves : 164970 best bound : 4.084737e+00 -simulation ci : 4.071445e+00 ± 4.036229e-02 +simulation ci : 4.071580e+00 ± 4.071235e-02 numeric issues : 0 ------------------------------------------------------------------- @@ -253,28 +251,29 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 4.512500e+00 4.066874e+00 1.945758e-01 1350 1 - 20 5.062500e+00 4.040569e+00 5.647199e-01 2700 1 - 30 4.968750e+00 4.039400e+00 1.092341e+00 4050 1 - 40 4.125000e+00 4.039286e+00 1.748799e+00 5400 1 - 50 3.925000e+00 4.039078e+00 3.279388e+00 6750 1 - 60 3.875000e+00 4.039004e+00 4.238859e+00 8100 1 - 70 3.918750e+00 4.039008e+00 5.358448e+00 9450 1 - 80 3.600000e+00 4.038911e+00 6.510171e+00 10800 1 - 90 4.250000e+00 4.038874e+00 7.829152e+00 12150 1 - 100 5.400000e+00 4.038820e+00 9.207392e+00 13500 1 - 110 3.000000e+00 4.038795e+00 1.072500e+01 14850 1 - 120 3.000000e+00 4.038812e+00 1.232206e+01 16200 1 - 130 2.993750e+00 4.038782e+00 1.403743e+01 17550 1 - 140 4.406250e+00 4.038770e+00 1.594878e+01 18900 1 - 150 5.625000e+00 4.038777e+00 1.789192e+01 20250 1 - 160 3.081250e+00 4.038772e+00 1.990137e+01 21600 1 - 161 4.875000e+00 4.038772e+00 2.012187e+01 21735 1 + 10 4.925000e+00 6.001820e+00 1.997499e-01 1350 1 + 20 4.056250e+00 5.577813e+00 6.041179e-01 2700 1 + 30 3.000000e+00 4.731415e+00 1.068949e+00 4050 1 + 40 5.025000e+00 4.043470e+00 1.588734e+00 5400 1 + 50 4.250000e+00 4.039874e+00 2.212946e+00 6750 1 + 60 4.312500e+00 4.039177e+00 2.971758e+00 8100 1 + 70 4.525000e+00 4.039054e+00 3.933827e+00 9450 1 + 80 3.687500e+00 4.039051e+00 5.006665e+00 10800 1 + 90 2.987500e+00 4.038970e+00 6.187553e+00 12150 1 + 100 3.225000e+00 4.038843e+00 7.497448e+00 13500 1 + 110 4.500000e+00 4.038843e+00 8.892059e+00 14850 1 + 120 5.750000e+00 4.038813e+00 1.041892e+01 16200 1 + 130 3.700000e+00 4.038777e+00 1.202767e+01 17550 1 + 140 3.800000e+00 4.038777e+00 1.373855e+01 18900 1 + 150 2.687500e+00 4.038777e+00 1.553139e+01 20250 1 + 160 4.737500e+00 4.038777e+00 1.758916e+01 21600 1 + 170 4.550000e+00 4.038777e+00 1.976712e+01 22950 1 + 172 3.050000e+00 4.038777e+00 2.014962e+01 23220 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.012187e+01 -total solves : 21735 -best bound : 4.038772e+00 -simulation ci : 4.072826e+00 ± 1.208714e-01 +total time (s) : 2.014962e+01 +total solves : 23220 +best bound : 4.038777e+00 +simulation ci : 4.085411e+00 ± 1.137697e-01 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/sldp_example_one/index.html b/dev/examples/sldp_example_one/index.html index 1d02e594a..29848a8b7 100644 --- a/dev/examples/sldp_example_one/index.html +++ b/dev/examples/sldp_example_one/index.html @@ -65,20 +65,19 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 2.987639e+00 1.163194e+00 3.866601e-01 1680 1 - 20 2.445919e+00 1.164572e+00 4.845541e-01 2560 1 - 30 3.191503e+00 1.164572e+00 8.795722e-01 4240 1 - 40 2.867005e+00 1.164617e+00 9.841540e-01 5120 1 - 50 2.788348e+00 1.164617e+00 1.385857e+00 6800 1 - 60 3.120693e+00 1.166901e+00 1.493642e+00 7680 1 - 70 2.863623e+00 1.166901e+00 1.895165e+00 9360 1 - 80 3.556423e+00 1.166901e+00 2.014104e+00 10240 1 - 84 3.052539e+00 1.166901e+00 2.057870e+00 10592 1 + 10 2.738921e+00 1.165036e+00 3.904660e-01 1680 1 + 20 3.573852e+00 1.165415e+00 4.874790e-01 2560 1 + 30 3.448800e+00 1.167299e+00 8.816559e-01 4240 1 + 40 2.671985e+00 1.167299e+00 9.822218e-01 5120 1 + 50 3.608689e+00 1.167299e+00 1.380473e+00 6800 1 + 60 2.737611e+00 1.167299e+00 1.482815e+00 7680 1 + 70 3.529174e+00 1.167299e+00 1.894601e+00 9360 1 + 80 3.538601e+00 1.167299e+00 2.025845e+00 10240 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 2.057870e+00 -total solves : 10592 -best bound : 1.166901e+00 -simulation ci : 3.160067e+00 ± 7.484453e-02 +total time (s) : 2.025845e+00 +total solves : 10240 +best bound : 1.167299e+00 +simulation ci : 3.210392e+00 ± 9.260012e-02 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/sldp_example_two/index.html b/dev/examples/sldp_example_two/index.html index 7dd526e03..e5f4d2f84 100644 --- a/dev/examples/sldp_example_two/index.html +++ b/dev/examples/sldp_example_two/index.html @@ -92,16 +92,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -4.700000e+01 -5.809615e+01 3.250408e-02 78 1 - 20 -9.800000e+01 -5.809615e+01 6.497502e-02 148 1 - 30 -9.800000e+01 -5.809615e+01 1.073740e-01 226 1 - 40 -4.000000e+01 -5.809615e+01 1.444240e-01 296 1 + 10 -9.800000e+01 -5.809615e+01 3.199697e-02 78 1 + 20 -4.000000e+01 -5.809615e+01 6.564403e-02 148 1 + 30 -9.800000e+01 -5.809615e+01 1.043379e-01 226 1 + 40 -4.700000e+01 -5.809615e+01 1.380351e-01 296 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.444240e-01 +total time (s) : 1.380351e-01 total solves : 296 best bound : -5.809615e+01 -simulation ci : -5.676250e+01 ± 8.129290e+00 +simulation ci : -4.968750e+01 ± 6.340715e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -133,16 +133,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -9.800000e+01 -6.196125e+01 4.038405e-02 138 1 - 20 -8.200000e+01 -6.196125e+01 7.790899e-02 258 1 - 30 -4.000000e+01 -6.196125e+01 1.283491e-01 396 1 - 40 -9.800000e+01 -6.196125e+01 1.703582e-01 516 1 + 10 -4.700000e+01 -6.196125e+01 3.930807e-02 138 1 + 20 -4.000000e+01 -6.196125e+01 7.583189e-02 258 1 + 30 -4.700000e+01 -6.196125e+01 1.254940e-01 396 1 + 40 -4.700000e+01 -6.196125e+01 1.631250e-01 516 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.703582e-01 +total time (s) : 1.631250e-01 total solves : 516 best bound : -6.196125e+01 -simulation ci : -5.646250e+01 ± 6.296744e+00 +simulation ci : -5.131250e+01 ± 5.474059e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -174,15 +174,15 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 10 -7.900000e+01 -6.546793e+01 7.706094e-02 462 1 - 20 -7.500000e+01 -6.546793e+01 1.395438e-01 852 1 - 30 -7.500000e+01 -6.546793e+01 2.574949e-01 1314 1 - 40 -6.300000e+01 -6.546793e+01 3.206749e-01 1704 1 + 10 -6.300000e+01 -6.546793e+01 7.825494e-02 462 1 + 20 -5.600000e+01 -6.546793e+01 1.369660e-01 852 1 + 30 -4.000000e+01 -6.546793e+01 2.524149e-01 1314 1 + 40 -7.000000e+01 -6.546793e+01 3.119290e-01 1704 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.206749e-01 +total time (s) : 3.119290e-01 total solves : 1704 best bound : -6.546793e+01 -simulation ci : -6.596250e+01 ± 4.258497e+00 +simulation ci : -6.001250e+01 ± 5.827677e+00 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/stochastic_all_blacks/index.html b/dev/examples/stochastic_all_blacks/index.html index 00828f179..98344c629 100644 --- a/dev/examples/stochastic_all_blacks/index.html +++ b/dev/examples/stochastic_all_blacks/index.html @@ -77,13 +77,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1L 6.000000e+00 1.200000e+01 4.188991e-02 11 1 - 40L 6.000000e+00 8.000000e+00 4.264100e-01 602 1 + 1L 9.000000e+00 1.422222e+01 4.241514e-02 11 1 + 40L 1.200000e+01 8.000000e+00 5.433002e-01 602 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 4.264100e-01 +total time (s) : 5.433002e-01 total solves : 602 best bound : 8.000000e+00 -simulation ci : 8.100000e+00 ± 9.225303e-01 +simulation ci : 7.725000e+00 ± 8.123457e-01 numeric issues : 0 -------------------------------------------------------------------- +------------------------------------------------------------------- diff --git a/dev/examples/the_farmers_problem/index.html b/dev/examples/the_farmers_problem/index.html index 2a62ee15d..2d34e5849 100644 --- a/dev/examples/the_farmers_problem/index.html +++ b/dev/examples/the_farmers_problem/index.html @@ -125,13 +125,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -9.800000e+04 4.922260e+05 8.616400e-02 6 1 - 40 4.882000e+04 1.083900e+05 1.159940e-01 240 1 + 1 -9.800000e+04 4.922260e+05 8.789086e-02 6 1 + 40 4.882000e+04 1.083900e+05 1.170979e-01 240 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.159940e-01 +total time (s) : 1.170979e-01 total solves : 240 best bound : 1.083900e+05 -simulation ci : 9.479342e+04 ± 1.998027e+04 +simulation ci : 9.422433e+04 ± 2.039856e+04 numeric issues : 0 --------------------------------------------------------------------

Checking the policy

Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:

@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)
+-------------------------------------------------------------------

Checking the policy

Birge and Louveaux report that the optimal objective value is $108,390. Check that we got the correct solution using SDDP.calculate_bound:

@assert isapprox(SDDP.calculate_bound(model), 108_390.0, atol = 0.1)
diff --git a/dev/examples/vehicle_location/index.html b/dev/examples/vehicle_location/index.html index dd579d175..5e24769e2 100644 --- a/dev/examples/vehicle_location/index.html +++ b/dev/examples/vehicle_location/index.html @@ -108,4 +108,4 @@ end # TODO(odow): find out why this fails -# vehicle_location_model(SDDP.ContinuousConicDuality())
vehicle_location_model (generic function with 1 method)
+# vehicle_location_model(SDDP.ContinuousConicDuality())
vehicle_location_model (generic function with 1 method)
diff --git a/dev/explanation/risk/index.html b/dev/explanation/risk/index.html index bf4fb0572..4d3e4376e 100644 --- a/dev/explanation/risk/index.html +++ b/dev/explanation/risk/index.html @@ -512,18 +512,18 @@ | | Visiting node 2 | | | Z = [1.0, 2.0, 3.0, 4.0] | | | p = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333] -| | | q = [0.8432391442060109, 0.07838042789699455, 0.07838042789699455] -| | | α = 0.5556945523744657 -| | | Adding cut : 126.48587163090163 volume_out + cost_to_go ≥ 18972.32505008287 +| | | q = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333] +| | | α = 5.551115123125783e-17 +| | | Adding cut : cost_to_go ≥ -5.551115123125783e-17 | | Visiting node 1 | | | Z = [1.0, 2.0, 3.0, 4.0] | | | p = [0.3333333333333333, 0.3333333333333333, 0.3333333333333333] | | | q = [1.0, 0.0, 0.0] | | | α = 1.0986122886681098 -| | | Adding cut : 100 volume_out + cost_to_go ≥ 29998.641399238186 +| | | Adding cut : 100 volume_out + cost_to_go ≥ 29998.594667538695 | Finished iteration -| | lower_bound = 14998.641399238184 -Upper bound = 9749.712112798506 ± 825.4751311354909

Finally, evaluate the decision rule:

evaluate_policy(
+| | lower_bound = 14998.594667538693
+Upper bound = 10049.61344155826 ± 918.5736577708174

Finally, evaluate the decision rule:

evaluate_policy(
     model;
     node = 1,
     incoming_state = Dict(:volume => 150.0),
@@ -536,4 +536,4 @@
   :volume_in          => 150.0
   :thermal_generation => 125.0
   :hydro_generation   => 25.0
-  :cost_to_go         => 9998.64
Info

For this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.

+ :cost_to_go => 9998.59
Info

For this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.

diff --git a/dev/explanation/theory_intro/index.html b/dev/explanation/theory_intro/index.html index 42d99b294..9e730fa33 100644 --- a/dev/explanation/theory_intro/index.html +++ b/dev/explanation/theory_intro/index.html @@ -202,7 +202,7 @@ println("ω = ", sample_uncertainty(model.nodes[1].uncertainty)) end
ω = 0.0
 ω = 0.0
-ω = 0.0

It's also going to be useful to define a function that generates a random walk through the nodes of the graph:

function sample_next_node(model::PolicyGraph, current::Int)
+ω = 100.0

It's also going to be useful to define a function that generates a random walk through the nodes of the graph:

function sample_next_node(model::PolicyGraph, current::Int)
     if length(model.arcs[current]) == 0
         # No outgoing arcs!
         return nothing
@@ -275,15 +275,15 @@
     return trajectory, simulation_cost
 end
forward_pass (generic function with 2 methods)

Let's take a look at one forward pass:

trajectory, simulation_cost = forward_pass(model);
| Forward Pass
 | | Visiting node 1
-| | | ω = 100.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 0.0)
+| | | x′ = Dict(:volume => 50.0)
 | | | C(x, u, ω) = 0.0
 | | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 0.0)
+| | | ω = 100.0
+| | | x = Dict(:volume => 50.0)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
+| | | C(x, u, ω) = 0.0
 | | Visiting node 3
 | | | ω = 100.0
 | | | x = Dict(:volume => 0.0)
@@ -382,18 +382,18 @@
 end
train (generic function with 1 method)

Using our model we defined earlier, we can go:

train(model; iteration_limit = 3, replications = 100)
Starting iteration 1
 | Forward Pass
 | | Visiting node 1
-| | | ω = 100.0
+| | | ω = 50.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 0.0)
+| | | x′ = Dict(:volume => 100.0)
 | | | C(x, u, ω) = 0.0
 | | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 0.0)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
+| | | ω = 100.0
+| | | x = Dict(:volume => 100.0)
+| | | x′ = Dict(:volume => 50.0)
+| | | C(x, u, ω) = 0.0
 | | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 0.0)
+| | | ω = 0.0
+| | | x = Dict(:volume => 50.0)
 | | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 15000.0
 | Backward pass
@@ -401,54 +401,54 @@
 | | | Skipping node because the cost-to-go is 0
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 22500.0
+| | | | V = 15000.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 15000.0
+| | | | V = 7500.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 7500.0
+| | | | V = 0.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Adding cut : 150 volume_out + cost_to_go ≥ 15000
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 30000.0
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 15000.0
+| | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 50.0
-| | | | V = 22500.0
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 10000.0
+| | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 100.0
-| | | | V = 15000.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 150 volume_out + cost_to_go ≥ 22500
+| | | | V = 5000.0
+| | | | dVdx′ = Dict(:volume => -100.0)
+| | | Adding cut : 99.99999999999999 volume_out + cost_to_go ≥ 20000
 | Finished iteration
-| | lower_bound = 2500.0
+| | lower_bound = 5000.000000000002
 Starting iteration 2
 | Forward Pass
 | | Visiting node 1
-| | | ω = 50.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 150.0)
-| | | C(x, u, ω) = 2500.0
+| | | x′ = Dict(:volume => 200.00000000000003)
+| | | C(x, u, ω) = 7500.000000000002
 | | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 150.0)
-| | | x′ = Dict(:volume => 100.0)
-| | | C(x, u, ω) = 10000.0
-| | Visiting node 3
 | | | ω = 50.0
-| | | x = Dict(:volume => 100.0)
-| | | x′ = Dict(:volume => 0.0)
+| | | x = Dict(:volume => 200.00000000000003)
+| | | x′ = Dict(:volume => 99.99999999999997)
+| | | C(x, u, ω) = -4.850638409455618e-12
+| | Visiting node 3
+| | | ω = 100.0
+| | | x = Dict(:volume => 99.99999999999997)
+| | | x′ = Dict(:volume => 49.99999999999997)
 | | | C(x, u, ω) = 0.0
 | Backward pass
 | | Visiting node 3
 | | | Skipping node because the cost-to-go is 0
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 7500.0
+| | | | V = 7500.000000000005
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 0.0
+| | | | V = 4.263256414560601e-12
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
 | | | | V = 0.0
@@ -456,34 +456,34 @@
 | | | Adding cut : 100 volume_out + cost_to_go ≥ 12500
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 12499.999999999998
+| | | | V = 7499.999999999995
 | | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 50.0
-| | | | V = 7499.999999999998
+| | | | V = 2499.999999999996
 | | | | dVdx′ = Dict(:volume => -100.0)
 | | | Solving φ = 100.0
-| | | | V = 2499.9999999999986
-| | | | dVdx′ = Dict(:volume => -100.0)
-| | | Adding cut : 99.99999999999999 volume_out + cost_to_go ≥ 22499.999999999996
+| | | | V = 0.0
+| | | | dVdx′ = Dict(:volume => 0.0)
+| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 16666.666666666664
 | Finished iteration
-| | lower_bound = 7499.999999999998
+| | lower_bound = 8333.333333333332
 Starting iteration 3
 | Forward Pass
 | | Visiting node 1
-| | | ω = 50.0
+| | | ω = 0.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 4999.999999999998
+| | | C(x, u, ω) = 7500.0
 | | Visiting node 2
 | | | ω = 50.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 124.99999999999997)
-| | | C(x, u, ω) = 2499.9999999999986
+| | | C(x, u, ω) = 2500.0
 | | Visiting node 3
-| | | ω = 0.0
+| | | ω = 50.0
 | | | x = Dict(:volume => 124.99999999999997)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 3750.000000000004
+| | | C(x, u, ω) = 0.0
 | Backward pass
 | | Visiting node 3
 | | | Skipping node because the cost-to-go is 0
@@ -512,7 +512,7 @@
 | Finished iteration
 | | lower_bound = 8333.333333333332
 Termination status: iteration limit
-Upper bound = 8625.0 ± 842.6106317675305

Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.

Implementation: evaluating the policy

A final step is the ability to evaluate the policy at a given point.

function evaluate_policy(
+Upper bound = 8625.0 ± 990.7678636269748

Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.

Implementation: evaluating the policy

A final step is the ability to evaluate the policy at a given point.

function evaluate_policy(
     model::PolicyGraph;
     node::Int,
     incoming_state::Dict{Symbol,Float64},
@@ -556,30 +556,20 @@
 

Then, train a policy:

train(model; iteration_limit = 3, replications = 100)
Starting iteration 1
 | Forward Pass
 | | Visiting node 1
-| | | ω = 0.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 50.0)
+| | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 0.0
 | | Visiting node 2
 | | | ω = 0.0
-| | | x = Dict(:volume => 50.0)
+| | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 10000.0
+| | | C(x, u, ω) = 15000.0
 | | Visiting node 3
 | | | ω = 0.0
 | | | x = Dict(:volume => 0.0)
 | | | x′ = Dict(:volume => 0.0)
 | | | C(x, u, ω) = 22500.0
-| | Visiting node 2
-| | | ω = 100.0
-| | | x = Dict(:volume => 0.0)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 5000.0
-| | Visiting node 3
-| | | ω = 50.0
-| | | x = Dict(:volume => 0.0)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 15000.0
 | Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
@@ -603,7 +593,7 @@
 | | | | V = 12500.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Adding cut : 150 volume_out + cost_to_go ≥ 20000
-| | Visiting node 3
+| | Visiting node 1
 | | | Solving φ = 0.0
 | | | | V = 35000.0
 | | | | dVdx′ = Dict(:volume => -150.0)
@@ -613,139 +603,149 @@
 | | | Solving φ = 100.0
 | | | | V = 20000.0
 | | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 75 volume_out + cost_to_go ≥ 13750
-| | Visiting node 2
-| | | Solving φ = 0.0
-| | | | V = 36250.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 50.0
-| | | | V = 28750.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 100.0
-| | | | V = 21250.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 150 volume_out + cost_to_go ≥ 28749.999999999996
-| | Visiting node 1
-| | | Solving φ = 0.0
-| | | | V = 36250.0
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 50.0
-| | | | V = 28749.999999999996
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Solving φ = 100.0
-| | | | V = 21249.999999999996
-| | | | dVdx′ = Dict(:volume => -150.0)
-| | | Adding cut : 150 volume_out + cost_to_go ≥ 36250
+| | | Adding cut : 150 volume_out + cost_to_go ≥ 27500
 | Finished iteration
-| | lower_bound = 11249.999999999998
+| | lower_bound = 4166.666666666667
 Starting iteration 2
 | Forward Pass
 | | Visiting node 1
 | | | ω = 50.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 4999.999999999997
+| | | x′ = Dict(:volume => 183.33333333333334)
+| | | C(x, u, ω) = 4166.666666666667
 | | Visiting node 2
-| | | ω = 0.0
-| | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 191.66666666666663)
-| | | C(x, u, ω) = 14166.666666666662
+| | | ω = 100.0
+| | | x = Dict(:volume => 183.33333333333334)
+| | | x′ = Dict(:volume => 133.33333333333334)
+| | | C(x, u, ω) = -2.8421709430404007e-12
 | | Visiting node 3
 | | | ω = 100.0
-| | | x = Dict(:volume => 191.66666666666663)
-| | | x′ = Dict(:volume => 141.66666666666663)
+| | | x = Dict(:volume => 133.33333333333334)
+| | | x′ = Dict(:volume => 83.33333333333334)
 | | | C(x, u, ω) = 0.0
+| | Visiting node 2
+| | | ω = 0.0
+| | | x = Dict(:volume => 83.33333333333334)
+| | | x′ = Dict(:volume => 83.33333333333333)
+| | | C(x, u, ω) = 15000.0
+| | Visiting node 3
+| | | ω = 50.0
+| | | x = Dict(:volume => 83.33333333333333)
+| | | x′ = Dict(:volume => 0.0)
+| | | C(x, u, ω) = 2500.0000000000027
 | Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
-| | | | V = 22500.000000000004
+| | | | V = 35000.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 15000.000000000004
+| | | | V = 27500.0
 | | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 100.0
-| | | | V = 10000.000000000004
-| | | | dVdx′ = Dict(:volume => -100.0)
-| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 17361.11111111111
+| | | | V = 20000.0
+| | | | dVdx′ = Dict(:volume => -150.0)
+| | | Adding cut : 75 volume_out + cost_to_go ≥ 13750
+| | Visiting node 2
+| | | Solving φ = 0.0
+| | | | V = 23750.0
+| | | | dVdx′ = Dict(:volume => -150.0)
+| | | Solving φ = 50.0
+| | | | V = 16250.000000000004
+| | | | dVdx′ = Dict(:volume => -150.0)
+| | | Solving φ = 100.0
+| | | | V = 11250.0
+| | | | dVdx′ = Dict(:volume => -75.0)
+| | | Adding cut : 125 volume_out + cost_to_go ≥ 27499.999999999996
+| | Visiting node 3
+| | | Solving φ = 0.0
+| | | | V = 32083.33333333333
+| | | | dVdx′ = Dict(:volume => -125.0)
+| | | Solving φ = 50.0
+| | | | V = 25833.33333333333
+| | | | dVdx′ = Dict(:volume => -125.0)
+| | | Solving φ = 100.0
+| | | | V = 19583.33333333333
+| | | | dVdx′ = Dict(:volume => -125.0)
+| | | Adding cut : 62.5 volume_out + cost_to_go ≥ 18125
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 14583.333333333336
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | | V = 20625.0
+| | | | dVdx′ = Dict(:volume => -150.0)
 | | | Solving φ = 50.0
-| | | | V = 11250.000000000004
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | | V = 16041.666666666668
+| | | | dVdx′ = Dict(:volume => -62.5)
 | | | Solving φ = 100.0
-| | | | V = 7916.666666666668
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
-| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 24027.777777777774
+| | | | V = 12916.666666666668
+| | | | dVdx′ = Dict(:volume => -62.5)
+| | | Adding cut : 91.66666666666666 volume_out + cost_to_go ≥ 28749.999999999996
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 20916.666666666664
-| | | | dVdx′ = Dict(:volume => -100.0)
+| | | | V = 25694.444444444438
+| | | | dVdx′ = Dict(:volume => -91.66666666666666)
 | | | Solving φ = 50.0
-| | | | V = 17361.11111111111
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
+| | | | V = 21111.111111111117
+| | | | dVdx′ = Dict(:volume => -91.66666666666666)
 | | | Solving φ = 100.0
-| | | | V = 14027.77777777778
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
-| | | Adding cut : 77.77777777777776 volume_out + cost_to_go ≥ 32990.74074074073
+| | | | V = 16527.77777777777
+| | | | dVdx′ = Dict(:volume => -91.66666666666666)
+| | | Adding cut : 91.66666666666666 volume_out + cost_to_go ≥ 37916.666666666664
 | Finished iteration
-| | lower_bound = 22435.185185185182
+| | lower_bound = 24583.333333333336
 Starting iteration 3
 | Forward Pass
 | | Visiting node 1
 | | | ω = 0.0
 | | | x = Dict(:volume => 200.0)
 | | | x′ = Dict(:volume => 200.0)
-| | | C(x, u, ω) = 7500.0
+| | | C(x, u, ω) = 7500.000000000004
 | | Visiting node 2
-| | | ω = 0.0
+| | | ω = 100.0
 | | | x = Dict(:volume => 200.0)
-| | | x′ = Dict(:volume => 56.66666666666664)
-| | | C(x, u, ω) = 666.6666666666642
+| | | x′ = Dict(:volume => 150.0)
+| | | C(x, u, ω) = 0.0
 | | Visiting node 3
 | | | ω = 0.0
-| | | x = Dict(:volume => 56.66666666666664)
-| | | x′ = Dict(:volume => 0.0)
-| | | C(x, u, ω) = 14000.000000000004
+| | | x = Dict(:volume => 150.0)
+| | | x′ = Dict(:volume => -0.0)
+| | | C(x, u, ω) = 0.0
 | Backward pass
 | | Visiting node 3
 | | | Solving φ = 0.0
-| | | | V = 43749.99999999999
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 43750.0
+| | | | dVdx′ = Dict(:volume => -100.00000000000001)
 | | | Solving φ = 50.0
-| | | | V = 36249.99999999999
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 38750.0
+| | | | dVdx′ = Dict(:volume => -100.00000000000001)
 | | | Solving φ = 100.0
-| | | | V = 30916.666666666664
-| | | | dVdx′ = Dict(:volume => -100.0)
-| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 18486.11111111111
+| | | | V = 33750.0
+| | | | dVdx′ = Dict(:volume => -100.00000000000001)
+| | | Adding cut : 50 volume_out + cost_to_go ≥ 19375
 | | Visiting node 2
 | | | Solving φ = 0.0
-| | | | V = 32486.111111111113
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 19375.0
+| | | | dVdx′ = Dict(:volume => -50.0)
 | | | Solving φ = 50.0
-| | | | V = 24986.111111111113
-| | | | dVdx′ = Dict(:volume => -150.0)
+| | | | V = 16875.0
+| | | | dVdx′ = Dict(:volume => -50.0)
 | | | Solving φ = 100.0
-| | | | V = 18041.666666666668
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
-| | | Adding cut : 122.22222222222221 volume_out + cost_to_go ≥ 32097.22222222222
+| | | | V = 14375.0
+| | | | dVdx′ = Dict(:volume => -50.0)
+| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 24375
 | | Visiting node 1
 | | | Solving φ = 0.0
-| | | | V = 23869.444444444445
-| | | | dVdx′ = Dict(:volume => -100.0)
+| | | | V = 24166.666666666664
+| | | | dVdx′ = Dict(:volume => -91.66666666666666)
 | | | Solving φ = 50.0
-| | | | V = 18869.444444444445
-| | | | dVdx′ = Dict(:volume => -100.0)
+| | | | V = 19583.33333333333
+| | | | dVdx′ = Dict(:volume => -91.66666666666666)
 | | | Solving φ = 100.0
-| | | | V = 14027.777777777774
-| | | | dVdx′ = Dict(:volume => -66.66666666666666)
-| | | Adding cut : 88.88888888888887 volume_out + cost_to_go ≥ 36700
+| | | | V = 16875.0
+| | | | dVdx′ = Dict(:volume => -49.99999999999999)
+| | | Adding cut : 77.77777777777777 volume_out + cost_to_go ≥ 35763.88888888888
 | Finished iteration
-| | lower_bound = 23922.222222222226
+| | lower_bound = 25208.333333333325
 Termination status: iteration limit
-Upper bound = 33660.75 ± 7404.1257539635335

Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!

evaluate_policy(
+Upper bound = 36975.0 ± 8654.928098715747

Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!

evaluate_policy(
     model;
     node = 3,
     incoming_state = Dict(:volume => 100.0),
@@ -758,4 +758,4 @@
   :volume_in          => 100.0
   :thermal_generation => 40.0
   :hydro_generation   => 110.0
-  :cost_to_go         => 18486.1
+ :cost_to_go => 19375.0 diff --git a/dev/guides/access_previous_variables/index.html b/dev/guides/access_previous_variables/index.html index 80f07b3a8..b481d05af 100644 --- a/dev/guides/access_previous_variables/index.html +++ b/dev/guides/access_previous_variables/index.html @@ -98,4 +98,4 @@ end end endA policy graph with 20 nodes. - Node indices: 1, ..., 20 + Node indices: 1, ..., 20 diff --git a/dev/guides/add_a_multidimensional_state_variable/index.html b/dev/guides/add_a_multidimensional_state_variable/index.html index 24e9be7c8..3728bb17d 100644 --- a/dev/guides/add_a_multidimensional_state_variable/index.html +++ b/dev/guides/add_a_multidimensional_state_variable/index.html @@ -19,4 +19,4 @@ end; Lower bound of outgoing x is: 0.0 Lower bound of outgoing y[1] is: 1.0 -Lower bound of outgoing z[3, :B] is: 3.0 +Lower bound of outgoing z[3, :B] is: 3.0 diff --git a/dev/guides/add_a_risk_measure/index.html b/dev/guides/add_a_risk_measure/index.html index d0f661312..cca11d6bb 100644 --- a/dev/guides/add_a_risk_measure/index.html +++ b/dev/guides/add_a_risk_measure/index.html @@ -40,7 +40,7 @@ 0.0 0.0 0.0 - 0.0

Expectation

SDDP.ExpectationType
Expectation()

The Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.

source
julia> using SDDP
julia> SDDP.adjust_probability( + 0.0

Expectation

SDDP.ExpectationType
Expectation()

The Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.

source
julia> using SDDP
julia> SDDP.adjust_probability( SDDP.Expectation(), risk_adjusted_probability, nominal_probability, @@ -51,7 +51,7 @@ 0.1 0.2 0.3 - 0.4

SDDP.Expectation is the default risk measure in SDDP.jl.

Worst-case

SDDP.WorstCaseType
WorstCase()

The worst-case risk measure. Places all of the probability weight on the worst outcome.

source
julia> SDDP.adjust_probability(
+ 0.4

SDDP.Expectation is the default risk measure in SDDP.jl.

Worst-case

SDDP.WorstCaseType
WorstCase()

The worst-case risk measure. Places all of the probability weight on the worst outcome.

source
julia> SDDP.adjust_probability(
            SDDP.WorstCase(),
            risk_adjusted_probability,
            nominal_probability,
@@ -62,7 +62,7 @@
  0.0
  0.0
  1.0
- 0.0

Average value at risk (AV@R)

SDDP.AVaRType
AVaR(β)

The average value at risk (AV@R) risk measure.

Computes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.

AV@R is also known as the conditional value at risk (CV@R) or expected shortfall.

source
julia> SDDP.adjust_probability(
+ 0.0

Average value at risk (AV@R)

SDDP.AVaRType
AVaR(β)

The average value at risk (AV@R) risk measure.

Computes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.

AV@R is also known as the conditional value at risk (CV@R) or expected shortfall.

source
julia> SDDP.adjust_probability(
            SDDP.AVaR(0.5),
            risk_adjusted_probability,
            nominal_probability,
@@ -84,10 +84,10 @@
  0.05
  0.1
  0.65
- 0.2

As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:

julia> SDDP.EAVaR(beta=0.25, lambda=0.4)A convex combination of 0.4 * SDDP.Expectation() + 0.6 * SDDP.AVaR(0.25)
SDDP.EAVaRFunction
EAVaR(;lambda=1.0, beta=1.0)

A risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).

    λ * E[x] + (1 - λ) * AV@R(β)[x]

Keyword Arguments

  • lambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).

  • beta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.

source

Distributionally robust

SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.

Modified Chi-squard

SDDP.ModifiedChiSquaredType
ModifiedChiSquared(radius::Float64; minimum_std=1e-5)

The distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.

Explanation

In a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the "worst case" probability distribution, in some sense.

In each backward pass we will compute a worst case probability distribution vector p. We compute p so that:

p ∈ argmax p'z
+ 0.2

As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:

julia> SDDP.EAVaR(beta=0.25, lambda=0.4)A convex combination of 0.4 * SDDP.Expectation() + 0.6 * SDDP.AVaR(0.25)
SDDP.EAVaRFunction
EAVaR(;lambda=1.0, beta=1.0)

A risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).

    λ * E[x] + (1 - λ) * AV@R(β)[x]

Keyword Arguments

  • lambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).

  • beta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.

source

Distributionally robust

SDDP.jl supports two types of distributionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.

Modified Chi-squard

SDDP.ModifiedChiSquaredType
ModifiedChiSquared(radius::Float64; minimum_std=1e-5)

The distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.

Explanation

In a Distributionally Robust Optimization (DRO) approach, we modify the probabilities we associate with all future scenarios so that the resulting probability distribution is the "worst case" probability distribution, in some sense.

In each backward pass we will compute a worst case probability distribution vector p. We compute p so that:

p ∈ argmax p'z
       s.t. [r; p - a] in SecondOrderCone()
            sum(p) == 1
-           p >= 0

where

  1. z is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.
  2. a is the uniform distribution
  3. r is a user specified radius - the larger the radius, the more conservative the policy.

Notes

The largest radius that will work with S scenarios is sqrt((S-1)/S).

If the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().

This code was contributed by Lea Kapelevich.

source
julia> SDDP.adjust_probability(
+           p >= 0

where

  1. z is a vector of future costs. We assume that our aim is to minimize future cost p'z. If we maximize reward, we would have p ∈ argmin{p'z}.
  2. a is the uniform distribution
  3. r is a user specified radius - the larger the radius, the more conservative the policy.

Notes

The largest radius that will work with S scenarios is sqrt((S-1)/S).

If the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().

This code was contributed by Lea Kapelevich.

source
julia> SDDP.adjust_probability(
            SDDP.ModifiedChiSquared(0.5),
            risk_adjusted_probability,
            [0.25, 0.25, 0.25, 0.25],
@@ -98,7 +98,7 @@
  0.3333333333333333
  0.044658198738520394
  0.6220084679281462
- 0.0

Wasserstein

SDDP.WassersteinType
Wasserstein(norm::Function, solver_factory; alpha::Float64)

A distributionally-robust risk measure based on the Wasserstein distance.

As alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.

source
julia> import HiGHS
julia> SDDP.adjust_probability( + 0.0

Wasserstein

SDDP.WassersteinType
Wasserstein(norm::Function, solver_factory; alpha::Float64)

A distributionally-robust risk measure based on the Wasserstein distance.

As alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.

source
julia> import HiGHS
julia> SDDP.adjust_probability( SDDP.Wasserstein(HiGHS.Optimizer; alpha=0.5) do x, y return abs(x - y) end, @@ -113,7 +113,7 @@ 0.7999999999999999 -0.0

Entropic

SDDP.EntropicType
Entropic(γ::Float64)

The entropic risk measure as described by:

Dowson, O., Morton, D.P. & Pagnoncelli, B.K. Incorporating convex risk
 measures into multistage stochastic programming algorithms. Annals of
-Operations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).

As γ increases, the measure becomes more risk-averse.

source
julia> SDDP.adjust_probability(
+Operations Research (2022). [doi](https://doi.org/10.1007/s10479-022-04977-w).

As γ increases, the measure becomes more risk-averse.

source
julia> SDDP.adjust_probability(
            SDDP.Entropic(0.1),
            risk_adjusted_probability,
            nominal_probability,
@@ -124,4 +124,4 @@
  0.1100296362588547
  0.19911786395979578
  0.3648046623591841
- 0.3260478374221655
+ 0.3260478374221655 diff --git a/dev/guides/add_integrality/index.html b/dev/guides/add_integrality/index.html index ceec64102..6f61da998 100644 --- a/dev/guides/add_integrality/index.html +++ b/dev/guides/add_integrality/index.html @@ -25,4 +25,4 @@ \max\limits_{\lambda}\min\limits_{\bar{x}, x^\prime, u} \;\; & C_i(\bar{x}, u, \omega) + \mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)] - \lambda^\top(\bar{x} - x)\\ & x^\prime = T_i(\bar{x}, u, \omega) \\ & u \in U_i(\bar{x}, \omega) -\end{aligned}\]

You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.

Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why "SDDiP" has poor performance.

Convergence

The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.

In many cases, papers claim to "do SDDiP," but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.

One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why "SDDiP" has poor performance.

In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.

+\end{aligned}\]

You can use Lagrangian duality in SDDP.jl by passing SDDP.LagrangianDuality to the duality_handler argument of SDDP.train.

Compared with linear programming duality, the Lagrangian problem is difficult to solve because it requires the solution of many mixed-integer programs instead of a single linear program. This is one reason why "SDDiP" has poor performance.

Convergence

The second part to SDDiP is a very tightly scoped claim: if all of the state variables are binary and the algorithm uses Lagrangian duality to compute a subgradient, then it will converge to an optimal policy.

In many cases, papers claim to "do SDDiP," but they have state variables which are not binary. In these cases, the algorithm is not guaranteed to converge to a globally optimal policy.

One work-around that has been suggested is to discretize the state variables into a set of binary state variables. However, this leads to a large number of binary state variables, which is another reason why "SDDiP" has poor performance.

In general, we recommend that you introduce integer variables into your model without fear of the consequences, and that you treat the resulting policy as a good heuristic, rather than an attempt to find a globally optimal policy.

diff --git a/dev/guides/add_multidimensional_noise/index.html b/dev/guides/add_multidimensional_noise/index.html index 5826b2361..3f4e44cb8 100644 --- a/dev/guides/add_multidimensional_noise/index.html +++ b/dev/guides/add_multidimensional_noise/index.html @@ -81,4 +81,4 @@ julia> SDDP.simulate(model, 1); ω is: [54, 38, 19] ω is: [43, 3, 13] -ω is: [43, 4, 17] +ω is: [43, 4, 17] diff --git a/dev/guides/add_noise_in_the_constraint_matrix/index.html b/dev/guides/add_noise_in_the_constraint_matrix/index.html index 28a2bff2f..69fe0c90f 100644 --- a/dev/guides/add_noise_in_the_constraint_matrix/index.html +++ b/dev/guides/add_noise_in_the_constraint_matrix/index.html @@ -20,4 +20,4 @@ julia> SDDP.simulate(model, 1); emissions : x_out <= 1 emissions : 0.2 x_out <= 1 -emissions : 0.5 x_out <= 1
Note

JuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.

+emissions : 0.5 x_out <= 1
Note

JuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.

diff --git a/dev/guides/choose_a_stopping_rule/index.html b/dev/guides/choose_a_stopping_rule/index.html index a75145f1f..63fddc34c 100644 --- a/dev/guides/choose_a_stopping_rule/index.html +++ b/dev/guides/choose_a_stopping_rule/index.html @@ -21,4 +21,4 @@ stopping_rules = [ SDDP.StoppingChain(SDDP.BoundStalling(10, 1e-4), SDDP.TimeLimit(100.0)), ], -)

See Stopping rules for a list of stopping rules supported by SDDP.jl.

+)

See Stopping rules for a list of stopping rules supported by SDDP.jl.

diff --git a/dev/guides/create_a_belief_state/index.html b/dev/guides/create_a_belief_state/index.html index 265d62d8a..1a7b16aec 100644 --- a/dev/guides/create_a_belief_state/index.html +++ b/dev/guides/create_a_belief_state/index.html @@ -34,4 +34,4 @@ (1, 2) => (2, 2) w.p. 0.2 Partitions {(1, 1), (1, 2)} - {(2, 1), (2, 2)} + {(2, 1), (2, 2)} diff --git a/dev/guides/create_a_general_policy_graph/index.html b/dev/guides/create_a_general_policy_graph/index.html index 6e8a3d840..080d34217 100644 --- a/dev/guides/create_a_general_policy_graph/index.html +++ b/dev/guides/create_a_general_policy_graph/index.html @@ -110,4 +110,4 @@ @variable(subproblem, x >= 0, SDDP.State, initial_value = 1) @constraint(subproblem, x.out <= x.in) @stageobjective(subproblem, price * x.out) -end +end diff --git a/dev/guides/debug_a_model/index.html b/dev/guides/debug_a_model/index.html index 4b9bb9366..9d114bf34 100644 --- a/dev/guides/debug_a_model/index.html +++ b/dev/guides/debug_a_model/index.html @@ -68,4 +68,4 @@ julia> optimize!(det_equiv) julia> objective_value(det_equiv) --5.472500000000001
Warning

The deterministic equivalent scales poorly with problem size. Only use this on small problems!

+-5.472500000000001
Warning

The deterministic equivalent scales poorly with problem size. Only use this on small problems!

diff --git a/dev/guides/improve_computational_performance/index.html b/dev/guides/improve_computational_performance/index.html index cd5296880..42a08e376 100644 --- a/dev/guides/improve_computational_performance/index.html +++ b/dev/guides/improve_computational_performance/index.html @@ -45,4 +45,4 @@ env = Gurobi.Env() set_optimizer(m, () -> Gurobi.Optimizer(env)) end, -) +) diff --git a/dev/guides/simulate_using_a_different_sampling_scheme/index.html b/dev/guides/simulate_using_a_different_sampling_scheme/index.html index 21b02a1ec..96c1349bc 100644 --- a/dev/guides/simulate_using_a_different_sampling_scheme/index.html +++ b/dev/guides/simulate_using_a_different_sampling_scheme/index.html @@ -165,4 +165,4 @@ ], [0.3, 0.7], ) -A Historical sampler with 2 scenarios sampled probabilistically.
Tip

Your sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.

+A Historical sampler with 2 scenarios sampled probabilistically.
Tip

Your sample space doesn't have to be a NamedTuple. It an be any Julia type! Use a Vector if that is easier, or define your own struct.

diff --git a/dev/index.html b/dev/index.html index 82a6bc64f..45eb32523 100644 --- a/dev/index.html +++ b/dev/index.html @@ -47,4 +47,4 @@ journal = {Annals of Operations Research}, author = {Dowson, O. and Morton, D.P. and Pagnoncelli, B.K.}, year = {2022}, -}

Here is an earlier preprint.

+}

Here is an earlier preprint.

diff --git a/dev/release_notes/index.html b/dev/release_notes/index.html index 30111c76d..0b08f52e0 100644 --- a/dev/release_notes/index.html +++ b/dev/release_notes/index.html @@ -3,4 +3,4 @@ function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-HZQQDVMPZW', {'page_path': location.pathname + location.search + location.hash}); -

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.10.1 (November 28, 2024)

Fixed

Other

  • Documentation updates (#801)

v1.10.0 (November 19, 2024)

Added

  • Added root_node_risk_measure keyword to train (#804)

Fixed

  • Fixed a bug with cut sharing in a graph with zero-probability arcs (#797)

Other

v1.9.0 (October 17, 2024)

Added

Fixed

  • Fixed the tests to skip threading tests if running in serial (#770)
  • Fixed BanditDuality to handle the case where the standard deviation is NaN (#779)
  • Fixed an error when lagged state variables are encountered in MSPFormat (#786)
  • Fixed publication_plot with replications of different lengths (#788)
  • Fixed CTRL+C interrupting the code at unsafe points (#789)

Other

  • Documentation improvements (#771) (#772)
  • Updated printing because of changes in JuMP (#773)

v1.8.1 (August 5, 2024)

Fixed

  • Fixed various issues with SDDP.Threaded() (#761)
  • Fixed a deprecation warning for sorting a dictionary (#763)

Other

  • Updated copyright notices (#762)
  • Updated .JuliaFormatter.toml (#764)

v1.8.0 (July 24, 2024)

Added

  • Added SDDP.Threaded(), which is an experimental parallel scheme that supports solving problems using multiple threads. Some parts of SDDP.jl may not be thread-safe, and this can cause incorrect results, segfaults, or other errors. Please use with care and report any issues by opening a GitHub issue. (#758)

Other

  • Documentation improvements and fixes (#747) (#759)

v1.7.0 (June 4, 2024)

Added

  • Added sample_backward_noise_terms_with_state for creating backward pass sampling schemes that depend on the current primal state. (#742) (Thanks @arthur-brigatto)

Fixed

  • Fixed error message when publication_plot has non-finite data (#738)

Other

  • Updated the logo constructor (#730)

v1.6.7 (February 1, 2024)

Fixed

  • Fixed non-constant state dimension in the MSPFormat reader (#695)
  • Fixed SimulatorSamplingScheme for deterministic nodes (#710)
  • Fixed line search in BFGS (#711)
  • Fixed handling of NEARLY_FEASIBLE_POINT status (#726)

Other

  • Documentation improvements (#692) (#694) (#706) (#716) (#727)
  • Updated to StochOptFormat v1.0 (#705)
  • Added an experimental OuterApproximation algorithm (#709)
  • Updated .gitignore (#717)
  • Added code for MDP paper (#720) (#721)
  • Added Google analytics (#723)

v1.6.6 (September 29, 2023)

Other

v1.6.5 (September 25, 2023)

Fixed

Other

v1.6.4 (September 23, 2023)

Fixed

Other

  • Documentation updates (#658) (#666) (#671)
  • Switch to GitHub action for deploying docs (#668) (#670)
  • Update to Documenter@1 (#669)

v1.6.3 (September 8, 2023)

Fixed

  • Fixed default stopping rule with iteration_limit or time_limit set (#662)

Other

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

Other

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

v0.3.7 (January 8, 2021)

Other

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

+

Release notes

The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.

v1.10.1 (November 28, 2024)

Fixed

Other

  • Documentation updates (#801)

v1.10.0 (November 19, 2024)

Added

  • Added root_node_risk_measure keyword to train (#804)

Fixed

  • Fixed a bug with cut sharing in a graph with zero-probability arcs (#797)

Other

v1.9.0 (October 17, 2024)

Added

Fixed

  • Fixed the tests to skip threading tests if running in serial (#770)
  • Fixed BanditDuality to handle the case where the standard deviation is NaN (#779)
  • Fixed an error when lagged state variables are encountered in MSPFormat (#786)
  • Fixed publication_plot with replications of different lengths (#788)
  • Fixed CTRL+C interrupting the code at unsafe points (#789)

Other

  • Documentation improvements (#771) (#772)
  • Updated printing because of changes in JuMP (#773)

v1.8.1 (August 5, 2024)

Fixed

  • Fixed various issues with SDDP.Threaded() (#761)
  • Fixed a deprecation warning for sorting a dictionary (#763)

Other

  • Updated copyright notices (#762)
  • Updated .JuliaFormatter.toml (#764)

v1.8.0 (July 24, 2024)

Added

  • Added SDDP.Threaded(), which is an experimental parallel scheme that supports solving problems using multiple threads. Some parts of SDDP.jl may not be thread-safe, and this can cause incorrect results, segfaults, or other errors. Please use with care and report any issues by opening a GitHub issue. (#758)

Other

  • Documentation improvements and fixes (#747) (#759)

v1.7.0 (June 4, 2024)

Added

  • Added sample_backward_noise_terms_with_state for creating backward pass sampling schemes that depend on the current primal state. (#742) (Thanks @arthur-brigatto)

Fixed

  • Fixed error message when publication_plot has non-finite data (#738)

Other

  • Updated the logo constructor (#730)

v1.6.7 (February 1, 2024)

Fixed

  • Fixed non-constant state dimension in the MSPFormat reader (#695)
  • Fixed SimulatorSamplingScheme for deterministic nodes (#710)
  • Fixed line search in BFGS (#711)
  • Fixed handling of NEARLY_FEASIBLE_POINT status (#726)

Other

  • Documentation improvements (#692) (#694) (#706) (#716) (#727)
  • Updated to StochOptFormat v1.0 (#705)
  • Added an experimental OuterApproximation algorithm (#709)
  • Updated .gitignore (#717)
  • Added code for MDP paper (#720) (#721)
  • Added Google analytics (#723)

v1.6.6 (September 29, 2023)

Other

v1.6.5 (September 25, 2023)

Fixed

Other

v1.6.4 (September 23, 2023)

Fixed

Other

  • Documentation updates (#658) (#666) (#671)
  • Switch to GitHub action for deploying docs (#668) (#670)
  • Update to Documenter@1 (#669)

v1.6.3 (September 8, 2023)

Fixed

  • Fixed default stopping rule with iteration_limit or time_limit set (#662)

Other

v1.6.2 (August 24, 2023)

Fixed

  • MSPFormat now detect and exploit stagewise independent lattices (#653)
  • Fixed set_optimizer for models read from file (#654)

Other

  • Fixed typo in pglib_opf.jl (#647)
  • Fixed documentation build and added color (#652)

v1.6.1 (July 20, 2023)

Fixed

  • Fixed bugs in MSPFormat reader (#638) (#639)

Other

  • Clarified OutOfSampleMonteCarlo docstring (#643)

v1.6.0 (July 3, 2023)

Added

Other

v1.5.1 (June 30, 2023)

This release contains a number of minor code changes, but it has a large impact on the content that is printed to screen. In particular, we now log periodically, instead of each iteration, and a "good" stopping rule is used as the default if none are specified. Try using SDDP.train(model) to see the difference.

Other

  • Fixed various typos in the documentation (#617)
  • Fixed printing test after changes in JuMP (#618)
  • Set SimulationStoppingRule as the default stopping rule (#619)
  • Changed the default logging frequency. Pass log_every_seconds = 0.0 to train to revert to the old behavior. (#620)
  • Added example usage with Distributions.jl (@slwu89) (#622)
  • Removed the numerical issue @warn (#627)
  • Improved the quality of docstrings (#630)

v1.5.0 (May 14, 2023)

Added

  • Added the ability to use a different model for the forward pass. This is a novel feature that lets you train better policies when the model is non-convex or does not have a well-defined dual. See the Alternative forward models tutorial in which we train convex and non-convex formulations of the optimal power flow problem. (#611)

Other

  • Updated missing changelog entries (#608)
  • Removed global variables (#610)
  • Converted the Options struct to keyword arguments. This struct was a private implementation detail, but the change is breaking if you developed an extension to SDDP that touched these internals. (#612)
  • Fixed some typos (#613)

v1.4.0 (May 8, 2023)

Added

Fixed

  • Fixed parsing of some MSPFormat files (#602) (#604)
  • Fixed printing in header (#605)

v1.3.0 (May 3, 2023)

Added

  • Added experimental support for SDDP.MSPFormat.read_from_file (#593)

Other

  • Updated to StochOptFormat v0.3 (#600)

v1.2.1 (May 1, 2023)

Fixed

  • Fixed log_every_seconds (#597)

v1.2.0 (May 1, 2023)

Added

Other

  • Tweaked how the log is printed (#588)
  • Updated to StochOptFormat v0.2 (#592)

v1.1.4 (April 10, 2023)

Fixed

  • Logs are now flushed every iteration (#584)

Other

  • Added docstrings to various functions (#581)
  • Minor documentation updates (#580)
  • Clarified integrality documentation (#582)
  • Updated the README (#585)
  • Number of numerical issues is now printed to the log (#586)

v1.1.3 (April 2, 2023)

Other

v1.1.2 (March 18, 2023)

Other

v1.1.1 (March 16, 2023)

Other

  • Fixed email in Project.toml
  • Added notebook to documentation tutorials (#571)

v1.1.0 (January 12, 2023)

Added

v1.0.0 (January 3, 2023)

Although we're bumping MAJOR version, this is a non-breaking release. Going forward:

  • New features will bump the MINOR version
  • Bug fixes, maintenance, and documentation updates will bump the PATCH version
  • We will support only the Long Term Support (currently v1.6.7) and the latest patch (currently v1.8.4) releases of Julia. Updates to the LTS version will bump the MINOR version
  • Updates to the compat bounds of package dependencies will bump the PATCH version.

We do not intend any breaking changes to the public API, which would require a new MAJOR release. The public API is everything defined in the documentation. Anything not in the documentation is considered private and may change in any PATCH release.

Added

Other

v0.4.9 (January 3, 2023)

Added

Other

  • Added tutorial on Markov Decision Processes (#556)
  • Added two-stage newsvendor tutorial (#557)
  • Refactored the layout of the documentation (#554) (#555)
  • Updated copyright to 2023 (#558)
  • Fixed errors in the documentation (#561)

v0.4.8 (December 19, 2022)

Added

Fixed

  • Reverted then fixed (#531) because it failed to account for problems with integer variables (#546) (#551)

v0.4.7 (December 17, 2022)

Added

  • Added initial_node support to InSampleMonteCarlo and OutOfSampleMonteCarlo (#535)

Fixed

  • Rethrow InterruptException when solver is interrupted (#534)
  • Fixed numerical recovery when we need dual solutions (#531) (Thanks @bfpc)
  • Fixed re-using the dashboard = true option between solves (#538)
  • Fixed bug when no @stageobjective is set (now defaults to 0.0) (#539)
  • Fixed errors thrown when invalid inputs are provided to add_objective_state (#540)

Other

  • Drop support for Julia versions prior to 1.6 (#533)
  • Updated versions of dependencies (#522) (#533)
  • Switched to HiGHS in the documentation and tests (#533)
  • Added license headers (#519)
  • Fixed link in air conditioning example (#521) (Thanks @conema)
  • Clarified variable naming in deterministic equivalent (#525) (Thanks @lucasprocessi)
  • Added this change log (#536)
  • Cuts are now written to model.cuts.json when numerical instability is discovered. This can aid debugging because it allows to you reload the cuts as of the iteration that caused the numerical issue (#537)

v0.4.6 (March 25, 2022)

Other

  • Updated to JuMP v1.0 (#517)

v0.4.5 (March 9, 2022)

Fixed

  • Fixed issue with set_silent in a subproblem (#510)

Other

v0.4.4 (December 11, 2021)

Added

  • Added BanditDuality (#471)
  • Added benchmark scripts (#475) (#476) (#490)
  • write_cuts_to_file now saves visited states (#468)

Fixed

  • Fixed BoundStalling in a deterministic policy (#470) (#474)
  • Fixed magnitude warning with zero coefficients (#483)

Other

  • Improvements to LagrangianDuality (#481) (#482) (#487)
  • Improvements to StrengthenedConicDuality (#486)
  • Switch to functional form for the tests (#478)
  • Fixed typos (#472) (Thanks @vfdev-5)
  • Update to JuMP v0.22 (#498)

v0.4.3 (August 31, 2021)

Added

  • Added biobjective solver (#462)
  • Added forward_pass_callback (#466)

Other

  • Update tutorials and documentation (#459) (#465)
  • Organize how paper materials are stored (#464)

v0.4.2 (August 24, 2021)

Fixed

  • Fixed a bug in Lagrangian duality (#457)

v0.4.1 (August 23, 2021)

Other

  • Minor changes to our implementation of LagrangianDuality (#454) (#455)

v0.4.0 (August 17, 2021)

Breaking

Other

v0.3.17 (July 6, 2021)

Added

Other

  • Display more model attributes (#438)
  • Documentation improvements (#433) (#437) (#439)

v0.3.16 (June 17, 2021)

Added

Other

  • Update risk measure docstrings (#418)

v0.3.15 (June 1, 2021)

Added

Fixed

Other

  • Add JuliaFormatter (#412)
  • Documentation improvements (#406) (#408)

v0.3.14 (March 30, 2021)

Fixed

  • Fixed O(N^2) behavior in get_same_children (#393)

v0.3.13 (March 27, 2021)

Fixed

  • Fixed bug in print.jl
  • Fixed compat of Reexport (#388)

v0.3.12 (March 22, 2021)

Added

  • Added problem statistics to header (#385) (#386)

Fixed

  • Fixed subtypes in visualization (#384)

v0.3.11 (March 22, 2021)

Fixed

  • Fixed constructor in direct mode (#383)

Other

  • Fix documentation (#379)

v0.3.10 (February 23, 2021)

Fixed

  • Fixed seriescolor in publication plot (#376)

v0.3.9 (February 20, 2021)

Added

  • Add option to simulate with different incoming state (#372)
  • Added warning for cuts with high dynamic range (#373)

Fixed

  • Fixed seriesalpha in publication plot (#375)

v0.3.8 (January 19, 2021)

Other

v0.3.7 (January 8, 2021)

Other

v0.3.6 (December 17, 2020)

Other

  • Fix typos (#358)
  • Collapse navigation bar in docs (#359)
  • Update TagBot.yml (#361)

v0.3.5 (November 18, 2020)

Other

  • Update citations (#348)
  • Switch to GitHub actions (#355)

v0.3.4 (August 25, 2020)

Added

  • Added non-uniform distributionally robust risk measure (#328)
  • Added numerical recovery functions (#330)
  • Added experimental StochOptFormat (#332) (#336) (#337) (#341) (#343) (#344)
  • Added entropic risk measure (#347)

Other

v0.3.3 (June 19, 2020)

Added

  • Added asynchronous support for price and belief states (#325)
  • Added ForwardPass plug-in system (#320)

Fixed

  • Fix check for probabilities in Markovian graph (#322)

v0.3.2 (April 6, 2020)

Added

Other

  • Improve error message in deterministic equivalent (#312)
  • Update to RecipesBase 1.0 (#313)

v0.3.1 (February 26, 2020)

Fixed

  • Fixed filename in integrality_handlers.jl (#304)

v0.3.0 (February 20, 2020)

Breaking

  • Breaking changes to update to JuMP v0.21 (#300).

v0.2.4 (February 7, 2020)

Added

  • Added a counter for the number of total subproblem solves (#301)

Other

  • Update formatter (#298)
  • Added tests (#299)

v0.2.3 (January 24, 2020)

Added

  • Added support for convex risk measures (#294)

Fixed

  • Fixed bug when subproblem is infeasible (#296)
  • Fixed bug in deterministic equivalent (#297)

Other

  • Added example from IJOC paper (#293)

v0.2.2 (January 10, 2020)

Fixed

  • Fixed flakey time limit in tests (#291)

Other

  • Removed MathOptFormat.jl (#289)
  • Update copyright (#290)

v0.2.1 (December 19, 2019)

Added

  • Added support for approximating a Markov lattice (#282) (#285)
  • Add tools for visualizing the value function (#272) (#286)
  • Write .mof.json files on error (#284)

Other

  • Improve documentation (#281) (#283)
  • Update tests for Julia 1.3 (#287)

v0.2.0 (December 16, 2019)

This version added the asynchronous parallel implementation with a few minor breaking changes in how we iterated internally. It didn't break basic user-facing models, only implementations that implemented some of the extension features. It probably could have been a v1.1 release.

Added

  • Added asynchronous parallel implementation (#277)
  • Added roll-out algorithm for cyclic graphs (#279)

Other

  • Improved error messages in PolicyGraph (#271)
  • Added JuliaFormatter (#273) (#276)
  • Fixed compat bounds (#274) (#278)
  • Added documentation for simulating non-standard graphs (#280)

v0.1.0 (October 17, 2019)

A complete rewrite of SDDP.jl based on the policy graph framework. This was essentially a new package. It has minimal code in common with the previous implementation.

Development started on September 28, 2018 in Kokako.jl, and the code was merged into SDDP.jl on March 14, 2019.

The pull request SDDP.jl#180 lists the 29 issues that the rewrite closed.

v0.0.1 (April 18, 2018)

Initial release. Development had been underway since January 22, 2016 in the StochDualDynamicProgram.jl repository. The last development commit there was April 5, 2017. Work then continued in this repository for a year before the first tagged release.

diff --git a/dev/tutorial/SDDP.log b/dev/tutorial/SDDP.log index 4e4185ee7..da990ca0d 100644 --- a/dev/tutorial/SDDP.log +++ b/dev/tutorial/SDDP.log @@ -4,7 +4,7 @@ problem nodes : 30 state variables : 5 - scenarios : 7.61719e+11 + scenarios : 9.49219e+11 existing cuts : false options solver : serial mode @@ -23,24 +23,24 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 -1.851068e+01 6.082580e+01 1.253403e+00 162 1 - 57 8.366327e+00 7.920620e+00 2.259916e+00 9234 1 - 106 1.008516e+01 7.898809e+00 3.270930e+00 17172 1 - 150 9.988110e+00 7.884976e+00 4.286971e+00 24300 1 - 189 1.004340e+01 7.884002e+00 5.308165e+00 30618 1 - 227 1.064751e+01 7.883383e+00 6.332887e+00 36774 1 - 263 1.048893e+01 7.882892e+00 7.348292e+00 42606 1 - 296 1.095086e+01 7.882802e+00 8.360027e+00 47952 1 - 330 9.548442e+00 7.882578e+00 9.385061e+00 53460 1 - 472 9.643479e+00 7.881958e+00 1.440997e+01 76464 1 - 591 1.019101e+01 7.881957e+00 1.945661e+01 95742 1 - 604 3.768654e+00 7.881957e+00 2.001620e+01 97848 1 + 1 -3.046519e+01 6.131071e+01 1.256958e+00 162 1 + 63 1.292233e+01 7.905125e+00 2.272572e+00 10206 1 + 112 9.307654e+00 7.896140e+00 3.284606e+00 18144 1 + 156 8.112625e+00 7.894985e+00 4.288534e+00 25272 1 + 199 1.046770e+01 7.894847e+00 5.300903e+00 32238 1 + 234 7.526852e+00 7.893376e+00 6.302604e+00 37908 1 + 269 9.742351e+00 7.893235e+00 7.304276e+00 43578 1 + 303 9.847280e+00 7.893047e+00 8.316021e+00 49086 1 + 336 9.009822e+00 7.892988e+00 9.328115e+00 54432 1 + 478 9.374486e+00 7.892676e+00 1.434819e+01 77436 1 + 599 9.603953e+00 7.892323e+00 1.936455e+01 97038 1 + 613 8.053651e+00 7.892310e+00 2.000001e+01 99306 1 ------------------------------------------------------------------- status : time_limit -total time (s) : 2.001620e+01 -total solves : 97848 -best bound : 7.881957e+00 -simulation ci : 8.994076e+00 ± 3.177224e-01 +total time (s) : 2.000001e+01 +total solves : 99306 +best bound : 7.892310e+00 +simulation ci : 8.902743e+00 ± 2.734557e-01 numeric issues : 0 ------------------------------------------------------------------- @@ -70,52 +70,52 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 7.348402e+02 6.320953e-03 103 1 - 2 3.666771e+02 6.078253e+02 2.297211e-02 406 1 - 3 6.078253e+02 5.716648e+02 2.764297e-02 509 1 - 4 6.015621e+02 5.678539e+02 1.319020e-01 612 1 - 5 5.035377e+02 5.658101e+02 1.366930e-01 715 1 - 6 6.204518e+02 5.651714e+02 1.413820e-01 818 1 - 7 5.854729e+02 5.651338e+02 1.460829e-01 921 1 - 8 6.250216e+02 5.651222e+02 1.508119e-01 1024 1 - 9 4.580476e+02 5.651222e+02 1.556809e-01 1127 1 - 10 6.241950e+02 5.651222e+02 1.604221e-01 1230 1 - 11 5.019674e+02 5.651222e+02 1.652141e-01 1333 1 - 12 6.241950e+02 5.651222e+02 1.701851e-01 1436 1 - 13 6.241950e+02 5.651222e+02 1.750400e-01 1539 1 - 14 5.879335e+02 5.651222e+02 1.798840e-01 1642 1 - 15 5.364740e+02 5.651222e+02 1.848750e-01 1745 1 - 16 5.477448e+02 5.651222e+02 1.897919e-01 1848 1 - 17 5.392017e+02 5.651222e+02 1.946499e-01 1951 1 - 18 6.039872e+02 5.651222e+02 2.001410e-01 2054 1 - 19 6.241950e+02 5.651222e+02 2.050810e-01 2157 1 - 20 6.241950e+02 5.651222e+02 2.099879e-01 2260 1 - 21 6.241950e+02 5.651222e+02 2.297289e-01 2563 1 - 22 6.058459e+02 5.651222e+02 2.347450e-01 2666 1 - 23 6.241950e+02 5.651222e+02 2.397051e-01 2769 1 - 24 4.741885e+02 5.651222e+02 2.448599e-01 2872 1 - 25 6.241950e+02 5.651222e+02 2.499211e-01 2975 1 - 26 5.392017e+02 5.651222e+02 2.548850e-01 3078 1 - 27 6.241950e+02 5.651222e+02 2.597980e-01 3181 1 - 28 4.741885e+02 5.651222e+02 2.648840e-01 3284 1 - 29 4.741885e+02 5.651222e+02 2.697959e-01 3387 1 - 30 4.824023e+02 5.651222e+02 2.747409e-01 3490 1 - 31 4.339231e+02 5.651222e+02 2.797949e-01 3593 1 - 32 5.879335e+02 5.651222e+02 2.848771e-01 3696 1 - 33 4.017211e+02 5.651222e+02 2.898641e-01 3799 1 - 34 6.241950e+02 5.651222e+02 2.949221e-01 3902 1 - 35 6.241950e+02 5.651222e+02 2.999289e-01 4005 1 - 36 5.631941e+02 5.651222e+02 3.049719e-01 4108 1 - 37 6.241950e+02 5.651222e+02 3.108039e-01 4211 1 - 38 6.241950e+02 5.651222e+02 3.158510e-01 4314 1 - 39 6.241950e+02 5.651222e+02 3.208930e-01 4417 1 - 40 6.241950e+02 5.651222e+02 3.259790e-01 4520 1 + 1 0.000000e+00 7.222377e+02 6.361008e-03 103 1 + 2 4.512787e+02 6.128430e+02 2.346492e-02 406 1 + 3 5.977139e+02 5.819736e+02 2.814698e-02 509 1 + 4 4.017871e+02 5.778848e+02 1.408529e-01 612 1 + 5 6.078771e+02 5.755208e+02 1.456439e-01 715 1 + 6 6.218727e+02 5.751574e+02 1.503499e-01 818 1 + 7 6.269908e+02 5.750415e+02 1.550970e-01 921 1 + 8 6.245749e+02 5.750390e+02 1.598799e-01 1024 1 + 9 5.430265e+02 5.750390e+02 1.647799e-01 1127 1 + 10 6.162024e+02 5.750390e+02 1.697149e-01 1230 1 + 11 5.171242e+02 5.750390e+02 1.745780e-01 1333 1 + 12 6.254119e+02 5.750390e+02 1.794848e-01 1436 1 + 13 6.254119e+02 5.750390e+02 1.844709e-01 1539 1 + 14 6.254119e+02 5.750390e+02 1.893959e-01 1642 1 + 15 5.913270e+02 5.750390e+02 1.944010e-01 1745 1 + 16 5.364626e+02 5.750390e+02 1.994739e-01 1848 1 + 17 6.254119e+02 5.750390e+02 2.043970e-01 1951 1 + 18 6.254119e+02 5.750390e+02 2.093439e-01 2054 1 + 19 4.856748e+02 5.750390e+02 2.143738e-01 2157 1 + 20 6.152163e+02 5.750390e+02 2.194028e-01 2260 1 + 21 6.254119e+02 5.750390e+02 2.399249e-01 2563 1 + 22 4.763439e+02 5.750390e+02 2.450058e-01 2666 1 + 23 6.254119e+02 5.750390e+02 2.499430e-01 2769 1 + 24 5.364626e+02 5.750390e+02 2.548928e-01 2872 1 + 25 5.468930e+02 5.750390e+02 2.599459e-01 2975 1 + 26 6.254119e+02 5.750390e+02 2.648909e-01 3078 1 + 27 5.904198e+02 5.750390e+02 2.699230e-01 3181 1 + 28 6.254119e+02 5.750390e+02 2.749429e-01 3284 1 + 29 6.254119e+02 5.750390e+02 2.801199e-01 3387 1 + 30 6.254119e+02 5.750390e+02 2.852349e-01 3490 1 + 31 5.212098e+02 5.750390e+02 2.902520e-01 3593 1 + 32 6.084504e+02 5.750390e+02 2.954769e-01 3696 1 + 33 6.233432e+02 5.750390e+02 3.005829e-01 3799 1 + 34 5.461888e+02 5.750390e+02 3.056829e-01 3902 1 + 35 6.254119e+02 5.750390e+02 3.107898e-01 4005 1 + 36 5.468930e+02 5.750390e+02 3.159349e-01 4108 1 + 37 6.254119e+02 5.750390e+02 3.210938e-01 4211 1 + 38 6.006553e+02 5.750390e+02 3.262949e-01 4314 1 + 39 6.254119e+02 5.750390e+02 3.315229e-01 4417 1 + 40 6.254119e+02 5.750390e+02 3.367560e-01 4520 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.259790e-01 +total time (s) : 3.367560e-01 total solves : 4520 -best bound : 5.651222e+02 -simulation ci : 5.521404e+02 ± 3.564836e+01 +best bound : 5.750390e+02 +simulation ci : 5.703792e+02 ± 3.367313e+01 numeric issues : 0 ------------------------------------------------------------------- @@ -145,11 +145,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.079600e+03 3.157700e+02 4.220796e-02 104 1 - 10 6.829100e+02 6.829100e+02 1.370349e-01 1040 1 + 1 1.079600e+03 3.157700e+02 4.417706e-02 104 1 + 10 6.829100e+02 6.829100e+02 1.409280e-01 1040 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.370349e-01 +total time (s) : 1.409280e-01 total solves : 1040 best bound : 6.829100e+02 simulation ci : 7.289889e+02 ± 7.726064e+01 @@ -181,16 +181,16 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 0.000000e+00 4.334593e-02 208 1 - 47 7.257579e+02 2.476231e+02 1.059568e+00 9776 1 - 87 3.281183e+02 2.639688e+02 2.084431e+00 18096 1 - 100 7.140000e+01 2.678261e+02 2.428722e+00 20800 1 + 1 5.630100e+02 1.043051e+02 4.839897e-02 208 1 + 45 6.671316e+02 2.504700e+02 1.066016e+00 9360 1 + 82 9.516355e+01 2.659068e+02 2.071876e+00 17056 1 + 100 1.130319e+02 2.699701e+02 2.597528e+00 20800 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 2.428722e+00 +total time (s) : 2.597528e+00 total solves : 20800 -best bound : 2.678261e+02 -simulation ci : 3.064557e+02 ± 4.472909e+01 +best bound : 2.699701e+02 +simulation ci : 2.878926e+02 ± 4.042203e+01 numeric issues : 0 ------------------------------------------------------------------- @@ -219,36 +219,37 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.255687e+04 3.677313e+04 2.121270e-01 2291 1 - 4 2.083487e+05 8.746115e+04 1.363061e+00 14988 1 - 6 3.223610e+05 9.276224e+04 3.593576e+00 35586 1 - 11 1.201300e+05 9.320034e+04 4.862322e+00 45793 1 - 13 1.666789e+05 9.326220e+04 6.751442e+00 59527 1 - 16 1.716321e+05 9.331955e+04 8.701844e+00 72848 1 - 22 5.702629e+04 9.335642e+04 1.392841e+01 103442 1 - 26 1.215511e+05 9.336760e+04 1.929000e+01 130286 1 - 30 3.044993e+05 9.337416e+04 2.735747e+01 165450 1 - 34 3.617240e+05 9.337718e+04 3.623241e+01 196870 1 - 37 1.084632e+05 9.337911e+04 4.212221e+01 217055 1 - 43 3.617192e+05 9.338103e+04 5.188009e+01 248481 1 - 48 1.298353e+05 9.338456e+04 5.701967e+01 264096 1 - 52 1.391148e+05 9.338625e+04 6.280596e+01 280748 1 - 55 8.306582e+04 9.338686e+04 6.868203e+01 296981 1 - 56 3.203149e+05 9.338696e+04 7.385514e+01 310712 1 - 62 2.691538e+05 9.338806e+04 8.324105e+01 334858 1 - 65 6.040284e+04 9.338883e+04 8.839991e+01 347555 1 - 66 2.919177e+05 9.338926e+04 9.370320e+01 360246 1 - 71 2.145927e+05 9.339060e+04 1.019890e+02 379397 1 - 73 1.372034e+05 9.339070e+04 1.086500e+02 394379 1 - 77 7.365508e+04 9.339272e+04 1.142400e+02 406663 1 - 97 8.616771e+04 9.339489e+04 1.455912e+02 470787 1 - 100 3.928315e+04 9.339497e+04 1.527235e+02 484524 1 + 1 2.804100e+02 5.890209e+01 4.986382e-02 211 1 + 5 1.118528e+05 7.769672e+04 1.130307e+00 12079 1 + 7 3.074107e+05 9.149884e+04 2.485533e+00 25813 1 + 11 1.842863e+04 9.271590e+04 3.586766e+00 36225 1 + 15 1.552078e+04 9.312341e+04 4.630039e+00 44557 1 + 17 3.544782e+05 9.317381e+04 7.012162e+00 61619 1 + 19 1.336219e+05 9.322522e+04 8.111396e+00 69113 1 + 22 9.964271e+04 9.325388e+04 9.623285e+00 79314 1 + 30 4.765079e+04 9.335391e+04 1.462770e+01 108874 1 + 41 3.326940e+05 9.336278e+04 2.222214e+01 146763 1 + 43 1.532419e+05 9.336628e+04 2.868801e+01 170481 1 + 48 2.777906e+05 9.337515e+04 3.601744e+01 197536 1 + 52 1.211977e+05 9.337753e+04 4.139290e+01 216476 1 + 54 3.936783e+05 9.337783e+04 4.684129e+01 235202 1 + 60 2.848994e+05 9.338138e+04 5.289324e+01 255396 1 + 67 1.725143e+05 9.338381e+04 5.942574e+01 276009 1 + 70 1.669101e+05 9.338524e+04 6.451532e+01 291202 1 + 73 7.539478e+04 9.338579e+04 7.037460e+01 307643 1 + 80 6.868808e+04 9.338758e+04 7.620151e+01 323264 1 + 84 8.020504e+04 9.338788e+04 8.185919e+01 337836 1 + 88 7.946325e+04 9.338853e+04 8.784756e+01 352824 1 + 91 1.505061e+05 9.338979e+04 9.512746e+01 370513 1 + 96 2.513012e+05 9.339043e+04 1.032188e+02 389456 1 + 98 1.736876e+05 9.339087e+04 1.108766e+02 406726 1 + 100 2.943117e+04 9.339143e+04 1.129484e+02 411308 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.527235e+02 -total solves : 484524 -best bound : 9.339497e+04 -simulation ci : 1.095319e+05 ± 1.998967e+04 +total time (s) : 1.129484e+02 +total solves : 411308 +best bound : 9.339143e+04 +simulation ci : 9.239141e+04 ± 1.767246e+04 numeric issues : 0 ------------------------------------------------------------------- @@ -277,14 +278,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.500000e+04 2.500000e+03 3.380060e-03 12 1 - 10 1.250000e+04 8.333333e+03 1.312304e-02 120 1 + 1 1.250000e+04 2.500000e+03 3.525019e-03 12 1 + 10 7.500000e+03 8.333333e+03 1.363301e-02 120 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.312304e-02 +total time (s) : 1.363301e-02 total solves : 120 best bound : 8.333333e+03 -simulation ci : 8.250000e+03 ± 4.324479e+03 +simulation ci : 9.250000e+03 ± 2.428125e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -314,18 +315,19 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.730368e+05 4.573582e+04 2.103090e-02 212 1 - 56 1.333763e+05 1.443352e+05 1.031628e+00 15172 1 - 113 1.623866e+05 1.443371e+05 2.033933e+00 29456 1 - 174 1.567026e+05 1.443373e+05 3.041980e+00 42388 1 - 232 1.228079e+05 1.443373e+05 4.057865e+00 55784 1 - 286 1.627237e+05 1.443373e+05 5.051593e+00 67232 1 + 1 3.418789e+05 4.573582e+04 1.870894e-02 212 1 + 56 1.312713e+05 1.443368e+05 1.032509e+00 15172 1 + 112 1.216711e+05 1.443373e+05 2.050640e+00 29244 1 + 171 1.153553e+05 1.443373e+05 3.061592e+00 41752 1 + 224 1.020500e+05 1.443373e+05 4.067351e+00 54088 1 + 280 1.890395e+05 1.443374e+05 5.074809e+00 65960 1 + 286 1.510184e+05 1.443374e+05 5.174048e+00 67232 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.051593e+00 +total time (s) : 5.174048e+00 total solves : 67232 -best bound : 1.443373e+05 -simulation ci : 1.479508e+05 ± 3.505903e+03 +best bound : 1.443374e+05 +simulation ci : 1.445408e+05 ± 3.345188e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -354,30 +356,28 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.247832e+06 4.707504e+04 4.018521e-02 694 1 - 34 2.175489e+05 3.061820e+05 1.046095e+00 14692 1 - 63 5.619092e+05 3.122724e+05 2.067367e+00 26145 1 - 83 8.071607e+05 3.126091e+05 3.070845e+00 36833 1 - 104 4.313111e+05 3.126584e+05 4.127327e+00 46955 1 - 123 2.815007e+05 3.126644e+05 5.128183e+00 55374 1 - 144 1.348958e+06 3.126649e+05 6.350076e+00 65286 1 - 163 1.631869e+05 3.126650e+05 7.369768e+00 72991 1 - 174 4.737552e+05 3.126650e+05 8.414378e+00 80163 1 - 190 7.722816e+05 3.126650e+05 9.479770e+00 86899 1 - 247 4.887447e+05 3.126650e+05 1.456315e+01 113395 1 - 279 1.116618e+06 3.126650e+05 1.971491e+01 132012 1 - 312 6.087447e+05 3.126650e+05 2.504867e+01 145800 1 - 332 6.159868e+05 3.126650e+05 3.049544e+01 157727 1 - 355 1.869292e+06 3.126650e+05 3.630772e+01 168586 1 - 374 1.113461e+06 3.126650e+05 4.220300e+01 177530 1 - 394 9.391868e+05 3.126650e+05 4.828403e+01 185341 1 - 400 1.731237e+05 3.126650e+05 5.038249e+01 187951 1 + 1 1.443632e+05 4.240397e+04 1.039004e-02 148 1 + 30 9.357057e+05 3.038036e+05 1.093769e+00 13932 1 + 61 1.145322e+06 3.122381e+05 2.215047e+00 26794 1 + 82 1.318644e+05 3.126027e+05 3.224195e+00 36979 1 + 105 5.981819e+05 3.126573e+05 4.285995e+00 46242 1 + 135 1.789341e+05 3.126647e+05 5.294752e+00 54924 1 + 160 2.031025e+05 3.126650e+05 6.322740e+00 63055 1 + 175 1.625131e+05 3.126650e+05 7.328955e+00 70336 1 + 188 1.318724e+06 3.126650e+05 8.548523e+00 78224 1 + 202 1.114513e+06 3.126650e+05 9.598627e+00 83866 1 + 259 4.654184e+05 3.126650e+05 1.460564e+01 109270 1 + 307 6.329132e+05 3.126650e+05 1.994067e+01 128491 1 + 338 1.111447e+05 3.126650e+05 2.496963e+01 141479 1 + 370 5.510605e+05 3.126650e+05 3.015878e+01 154846 1 + 396 3.984289e+05 3.126650e+05 3.536933e+01 166758 1 + 400 2.037763e+05 3.126650e+05 3.610123e+01 168358 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.038249e+01 -total solves : 187951 +total time (s) : 3.610123e+01 +total solves : 168358 best bound : 3.126650e+05 -simulation ci : 3.437536e+05 ± 3.240032e+04 +simulation ci : 2.988692e+05 ± 2.628178e+04 numeric issues : 0 ------------------------------------------------------------------- @@ -406,14 +406,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.562500e+04 1.991887e+03 5.522013e-03 18 1 - 40 2.750000e+04 8.072917e+03 1.332040e-01 1320 1 + 1 3.750000e+04 1.991887e+03 5.151033e-03 18 1 + 40 3.375000e+04 8.072917e+03 1.328862e-01 1320 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.332040e-01 +total time (s) : 1.328862e-01 total solves : 1320 best bound : 8.072917e+03 -simulation ci : 8.007212e+03 ± 2.328351e+03 +simulation ci : 9.511572e+03 ± 2.649970e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -444,11 +444,11 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.499895e+01 1.562631e+00 1.619101e-02 6 1 - 40 8.333333e+00 8.333333e+00 6.723690e-01 246 1 + 1 2.499895e+01 1.562631e+00 1.630497e-02 6 1 + 40 8.333333e+00 8.333333e+00 6.955659e-01 246 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.723690e-01 +total time (s) : 6.955659e-01 total solves : 246 best bound : 8.333333e+00 simulation ci : 8.810723e+00 ± 8.167195e-01 @@ -481,14 +481,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 9.000000e+00 2.183914e-03 3 1 - 44 2.400000e+01 6.561000e+00 7.216930e-01 3020 1 + 1 0.000000e+00 1.000000e+01 6.312847e-03 17 1 + 40 0.000000e+00 6.561000e+00 9.120228e-01 3214 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.216930e-01 -total solves : 3020 +total time (s) : 9.120228e-01 +total solves : 3214 best bound : 6.561000e+00 -simulation ci : 6.113636e+00 ± 3.054298e+00 +simulation ci : 9.575000e+00 ± 4.641862e+00 numeric issues : 0 ------------------------------------------------------------------- @@ -513,14 +513,16 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.246094e+04 5.000000e+03 2.144408e-02 39 1 - 40 -5.861978e-12 5.092593e+03 1.895840e-01 2160 1 + 1 6.703125e+03 3.160920e+03 2.269411e-02 39 1 + 210 9.281250e+03 5.092593e+03 1.023565e+00 9990 1 + 439 0.000000e+00 5.092593e+03 2.023999e+00 19521 1 + 597 9.765625e+03 5.092593e+03 2.728375e+00 25983 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.895840e-01 -total solves : 2160 +total time (s) : 2.728375e+00 +total solves : 25983 best bound : 5.092593e+03 -simulation ci : 5.251786e+03 ± 1.543723e+03 +simulation ci : 5.073010e+03 ± 3.046377e+02 numeric issues : 0 ------------------------------------------------------------------- @@ -545,14 +547,14 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.300000e+04 3.529412e+03 2.303314e-02 39 1 - 69 1.006250e+04 5.053782e+03 3.563740e-01 3591 1 + 1 1.100000e+04 3.209583e+03 2.326894e-02 39 1 + 194 0.000000e+00 5.135984e+03 9.376059e-01 9066 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 3.563740e-01 -total solves : 3591 -best bound : 5.053782e+03 -simulation ci : 6.274615e+03 ± 9.638861e+02 +total time (s) : 9.376059e-01 +total solves : 9066 +best bound : 5.135984e+03 +simulation ci : 5.339342e+03 ± 6.014698e+02 numeric issues : 0 ------------------------------------------------------------------- @@ -581,14 +583,14 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.875000e+04 3.958333e+03 3.507137e-03 12 1 - 40 1.125000e+04 1.062500e+04 6.445909e-02 642 1 + 1 2.500000e+04 3.958333e+03 3.526926e-03 12 1 + 40 5.000000e+03 1.062500e+04 6.979108e-02 642 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.445909e-02 +total time (s) : 6.979108e-02 total solves : 642 best bound : 1.062500e+04 -simulation ci : 1.009907e+04 ± 2.630322e+03 +simulation ci : 1.313702e+04 ± 2.776498e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -618,16 +620,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.117488e+06 6.644081e+04 4.093120e-01 115 1 - 5 4.172784e+05 3.548923e+05 1.589070e+00 315 1 - 8 1.468074e+06 3.858370e+05 3.510332e+00 632 1 - 10 4.215058e+05 4.113363e+05 4.284235e+00 762 1 + 1 5.258639e+05 6.716719e+04 1.439750e-01 35 1 + 4 1.359574e+06 3.992498e+05 2.520720e+00 416 1 + 6 2.723873e+05 4.062831e+05 3.753092e+00 586 1 + 8 1.418149e+06 4.338115e+05 5.991363e+00 952 1 + 10 7.243390e+04 4.372813e+05 7.110559e+00 1122 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.284235e+00 -total solves : 762 -best bound : 4.113363e+05 -simulation ci : 6.709825e+05 ± 5.910293e+05 +total time (s) : 7.110559e+00 +total solves : 1122 +best bound : 4.372813e+05 +simulation ci : 9.063244e+05 ± 4.927642e+05 numeric issues : 0 ------------------------------------------------------------------- @@ -659,16 +662,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 5.425983e+06 6.761151e+04 9.354270e-01 115 1 - 4 1.441443e+06 1.686541e+05 5.156463e+00 424 1 - 7 2.192062e+06 3.537604e+05 1.224686e+01 833 1 - 10 1.170978e+06 3.945641e+05 1.543006e+01 1062 1 + 1 1.053113e+05 1.124813e+05 3.466051e-01 27 1 + 4 1.586974e+06 2.147279e+05 3.144319e+00 228 1 + 5 1.739999e+06 3.610220e+05 8.236536e+00 519 1 + 7 1.828258e+05 3.689930e+05 9.812516e+00 609 1 + 10 6.414154e+04 4.187751e+05 1.500952e+01 958 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.543006e+01 -total solves : 1062 -best bound : 3.945641e+05 -simulation ci : 1.244673e+06 ± 1.037144e+06 +total time (s) : 1.500952e+01 +total solves : 958 +best bound : 4.187751e+05 +simulation ci : 8.786231e+05 ± 5.836925e+05 numeric issues : 0 ------------------------------------------------------------------- @@ -698,17 +702,17 @@ numerical stability report ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.748481e+06 6.717321e+04 4.102278e-01 69 1 - 4 1.861689e+06 2.354052e+05 1.924735e+00 228 1 - 6 1.251373e+06 3.968360e+05 4.189864e+00 450 1 - 9 1.887933e+06 3.992425e+05 5.804354e+00 594 1 - 10 4.030031e+05 4.190937e+05 6.450608e+00 654 1 + 1 9.327645e+04 6.695882e+04 2.322772e-01 18 1 + 2 6.655103e+06 1.011807e+05 1.932665e+00 189 1 + 5 5.528351e+05 2.933743e+05 3.062281e+00 306 1 + 7 5.452180e+06 3.906191e+05 4.611054e+00 456 1 + 10 1.593956e+05 4.079643e+05 5.614497e+00 555 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 6.450608e+00 -total solves : 654 -best bound : 4.190937e+05 -simulation ci : 1.225850e+06 ± 7.384072e+05 +total time (s) : 5.614497e+00 +total solves : 555 +best bound : 4.079643e+05 +simulation ci : 1.394250e+06 ± 1.535039e+06 numeric issues : 0 ------------------------------------------------------------------- @@ -732,14 +736,14 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.375000e+04 1.991887e+03 1.403499e-02 18 1 - 20 1.875000e+03 8.072917e+03 5.001402e-02 360 1 + 1 9.375000e+03 1.991887e+03 1.412082e-02 18 1 + 20 5.000000e+03 8.072917e+03 5.055285e-02 360 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.001402e-02 +total time (s) : 5.055285e-02 total solves : 360 best bound : 8.072917e+03 -simulation ci : 1.074973e+04 ± 5.181002e+03 +simulation ci : 7.957724e+03 ± 2.914884e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -763,11 +767,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 3.000000e+00 3.060102e-03 6 1 - 5 3.500000e+00 3.500000e+00 5.841017e-03 30 1 + 1 6.500000e+00 3.000000e+00 3.360033e-03 6 1 + 5 3.500000e+00 3.500000e+00 6.263018e-03 30 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.841017e-03 +total time (s) : 6.263018e-03 total solves : 30 best bound : 3.500000e+00 simulation ci : 4.100000e+00 ± 1.176000e+00 @@ -794,11 +798,11 @@ subproblem structure ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 1.100000e+01 2.971888e-03 6 1 - 5 5.500000e+00 1.100000e+01 5.368948e-03 30 1 + 1 6.500000e+00 1.100000e+01 2.955914e-03 6 1 + 5 5.500000e+00 1.100000e+01 5.394936e-03 30 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.368948e-03 +total time (s) : 5.394936e-03 total solves : 30 best bound : 1.100000e+01 simulation ci : 5.700000e+00 ± 3.920000e-01 diff --git a/dev/tutorial/arma/index.html b/dev/tutorial/arma/index.html index 68a115e71..630b314c8 100644 --- a/dev/tutorial/arma/index.html +++ b/dev/tutorial/arma/index.html @@ -44,36 +44,37 @@ end return inflow end
simulator (generic function with 1 method)

When called with no arguments, it produces a vector of inflows:

simulator()
3-element Vector{Float64}:
- 59.6
- 49.6
- 39.6
Warning

The simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.

The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:

graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
Root
+ 50.1
+ 50.2
+ 40.2
Warning

The simulator must return a Vector{Float64}, so it is limited to a uni-variate random variable. It is possible to do something similar for multi-variate random variable, but you'll have to manually construct the Markov transition matrix, and solution times scale poorly, even in the two-dimensional case.

The next step is to call SDDP.MarkovianGraph with our simulator. This function will attempt to fit a Markov chain to the stochastic process produced by your simulator. There are two key arguments:

graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
Root
  (0, 0.0)
 Nodes
- (1, 45.13904806668787)
- (2, 40.110914565512545)
- (2, 68.56402543553762)
+ (1, 52.735231470201136)
+ (1, 59.6)
+ (2, 46.10406260386451)
+ (2, 59.7)
  (2, 69.2)
- (3, 37.45020877356096)
+ (3, 45.963253979286286)
  (3, 49.7)
- (3, 50.16138708833052)
- (3, 78.57896930035156)
+ (3, 67.50992975887934)
 Arcs
- (0, 0.0) => (1, 45.13904806668787) w.p. 1.0
- (1, 45.13904806668787) => (2, 68.56402543553762) w.p. 0.13333333333333333
- (1, 45.13904806668787) => (2, 40.110914565512545) w.p. 0.8
- (1, 45.13904806668787) => (2, 69.2) w.p. 0.06666666666666667
- (2, 40.110914565512545) => (3, 50.16138708833052) w.p. 0.08333333333333333
- (2, 40.110914565512545) => (3, 37.45020877356096) w.p. 0.6666666666666666
- (2, 40.110914565512545) => (3, 49.7) w.p. 0.25
- (2, 40.110914565512545) => (3, 78.57896930035156) w.p. 0.0
- (2, 68.56402543553762) => (3, 50.16138708833052) w.p. 0.75
- (2, 68.56402543553762) => (3, 37.45020877356096) w.p. 0.0
- (2, 68.56402543553762) => (3, 49.7) w.p. 0.25
- (2, 68.56402543553762) => (3, 78.57896930035156) w.p. 0.0
- (2, 69.2) => (3, 50.16138708833052) w.p. 0.0
- (2, 69.2) => (3, 37.45020877356096) w.p. 0.0
+ (0, 0.0) => (1, 52.735231470201136) w.p. 0.6333333333333333
+ (0, 0.0) => (1, 59.6) w.p. 0.36666666666666664
+ (1, 52.735231470201136) => (2, 59.7) w.p. 0.3684210526315789
+ (1, 52.735231470201136) => (2, 46.10406260386451) w.p. 0.631578947368421
+ (1, 52.735231470201136) => (2, 69.2) w.p. 0.0
+ (1, 59.6) => (2, 59.7) w.p. 0.36363636363636365
+ (1, 59.6) => (2, 46.10406260386451) w.p. 0.45454545454545453
+ (1, 59.6) => (2, 69.2) w.p. 0.18181818181818182
+ (2, 46.10406260386451) => (3, 45.963253979286286) w.p. 0.5294117647058824
+ (2, 46.10406260386451) => (3, 49.7) w.p. 0.17647058823529413
+ (2, 46.10406260386451) => (3, 67.50992975887934) w.p. 0.29411764705882354
+ (2, 59.7) => (3, 45.963253979286286) w.p. 0.18181818181818182
+ (2, 59.7) => (3, 49.7) w.p. 0.18181818181818182
+ (2, 59.7) => (3, 67.50992975887934) w.p. 0.6363636363636364
+ (2, 69.2) => (3, 45.963253979286286) w.p. 0.0
  (2, 69.2) => (3, 49.7) w.p. 0.0
- (2, 69.2) => (3, 78.57896930035156) w.p. 1.0

Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.

Create a SDDP.PolicyGraph using graph as follows:

model = SDDP.PolicyGraph(
+ (2, 69.2) => (3, 67.50992975887934) w.p. 1.0

Here we can see we have created a MarkovianGraph with nodes like (2, 59.7). The first element of each node is the stage, and the second element is the inflow.

Create a SDDP.PolicyGraph using graph as follows:

model = SDDP.PolicyGraph(
     graph;  # <--- New stuff
     sense = :Min,
     lower_bound = 0.0,
@@ -90,7 +91,7 @@
     # The new water balance constraint using the node:
     @constraint(sp, x.out == x.in - g_h - s + inflow)
 end
A policy graph with 8 nodes.
- Node indices: (1, 45.13904806668787), (2, 40.110914565512545), (2, 68.56402543553762), (2, 69.2), (3, 37.45020877356096), (3, 49.7), (3, 50.16138708833052), (3, 78.57896930035156)
+ Node indices: (1, 52.735231470201136), (1, 59.6), (2, 46.10406260386451), (2, 59.7), (2, 69.2), (3, 45.963253979286286), (3, 49.7), (3, 67.50992975887934)
 

When can this trick be used?

The Markov chain approach should be used when:

Vector auto-regressive models

The state-space expansion section assumed that the random variable was uni-variate. However, the approach naturally extends to vector auto-regressive models. For example, if inflow is a 2-dimensional vector, then we can model a vector auto-regressive model to it as follows:

\[inflow_{t} = A \times inflow_{t-1} + b + \varepsilon\]

Here A is a 2-by-2 matrix, and b and $\varepsilon$ are 2-by-1 vectors.

model = SDDP.LinearPolicyGraph(;
     stages = 3,
     sense = :Min,
@@ -130,4 +131,4 @@
     end
 end
A policy graph with 3 nodes.
  Node indices: 1, 2, 3
-
+ diff --git a/dev/tutorial/convex.cuts.json b/dev/tutorial/convex.cuts.json index be54de902..c904bd77b 100644 --- a/dev/tutorial/convex.cuts.json +++ b/dev/tutorial/convex.cuts.json @@ -1 +1 @@ -[{"risk_set_cuts":[],"node":"1","single_cuts":[{"state":{"x":0.3349461006592544},"intercept":136942.1292824911,"coefficients":{"x":-317616.66663406935}},{"state":{"x":0.0},"intercept":321380.8754720096,"coefficients":{"x":-318249.99997748446}},{"state":{"x":0.6698921986092845},"intercept":133290.78847266026,"coefficients":{"x":-318249.9981416703}},{"state":{"x":0.3349461006592544},"intercept":247961.59147394565,"coefficients":{"x":-318249.99996627046}},{"state":{"x":0.0},"intercept":357155.1909028029,"coefficients":{"x":-318250.00002266077}},{"state":{"x":0.3349461006592544},"intercept":251393.8990334667,"coefficients":{"x":-318249.99996905896}},{"state":{"x":0.0},"intercept":358259.1644539555,"coefficients":{"x":-318250.00002568774}},{"state":{"x":0.3349461006592544},"intercept":251748.98306371618,"coefficients":{"x":-318249.999969381}},{"state":{"x":0.0},"intercept":358373.3743174498,"coefficients":{"x":-318250.00002601923}},{"state":{"x":0.6698921986092845},"intercept":145189.12205433843,"coefficients":{"x":-318249.99964910885}},{"state":{"x":0.3349461006592544},"intercept":251788.59311496522,"coefficients":{"x":-318249.9999694168}},{"state":{"x":0.0},"intercept":358386.11456528073,"coefficients":{"x":-318250.0000260564}},{"state":{"x":0.0},"intercept":358386.4120597384,"coefficients":{"x":-318250.0000260573}},{"state":{"x":0.0},"intercept":358386.5077463884,"coefficients":{"x":-318250.0000260576}},{"state":{"x":0.0},"intercept":358386.5385232139,"coefficients":{"x":-318250.00002605765}},{"state":{"x":0.0},"intercept":358386.5484223277,"coefficients":{"x":-318250.0000260577}},{"state":{"x":0.0},"intercept":358386.5516062963,"coefficients":{"x":-318250.0000260577}},{"state":{"x":0.0},"intercept":358386.55263039324,"coefficients":{"x":-318250.0000260577}},{"state":{"x":0.3349461006592544},"intercept":251789.9564249417,"coefficients":{"x":-318249.99996941804}},{"state":{"x":0.0},"intercept":358386.55306274304,"coefficients":{"x":-318250.0000260577}},{"state":{"x":0.3349461006592544},"intercept":251789.95656400398,"coefficients":{"x":-318249.99996941804}},{"state":{"x":0.0},"intercept":358386.55310747173,"coefficients":{"x":-318250.0000260577}},{"state":{"x":0.0},"intercept":358386.55313391384,"coefficients":{"x":-318250.0000256875}},{"state":{"x":0.6747303412755193},"intercept":143653.62200891238,"coefficients":{"x":-318249.99959476123}},{"state":{"x":0.33978424320526496},"intercept":250250.2177455026,"coefficients":{"x":-318249.9999680017}},{"state":{"x":5.004838142560195},"intercept":17103.901473410177,"coefficients":{"x":-633.333330646907}},{"state":{"x":7.669892057195955},"intercept":32654.041931507167,"coefficients":{"x":-601.6666756692949}},{"state":{"x":10.334945989032521},"intercept":47436.703200800126,"coefficients":{"x":-571.5833483922545}},{"state":{"x":7.334948325054086},"intercept":64565.9045675777,"coefficients":{"x":-543.0041941377664}},{"state":{"x":0.9122800185239994},"intercept":156798.53813351848,"coefficients":{"x":-101901.11805365628}},{"state":{"x":0.9122800185239993},"intercept":157729.19474565153,"coefficients":{"x":-102362.50000908317}},{"state":{"x":0.9122800187628556},"intercept":157735.60601407808,"coefficients":{"x":-102362.50000907027}},{"state":{"x":0.2122801143467389},"intercept":343517.1994889159,"coefficients":{"x":-349714.79163971066}},{"state":{"x":0.912280025626494},"intercept":174420.46771940507,"coefficients":{"x":-112326.35072004194}},{"state":{"x":2.3397932602884883},"intercept":93197.1153998917,"coefficients":{"x":-36691.96234371162}},{"state":{"x":5.0048449432488455},"intercept":82170.82437445674,"coefficients":{"x":-1755.284652332968}},{"state":{"x":4.669896625302293},"intercept":99524.63371786261,"coefficients":{"x":-2139.1734451057787}},{"state":{"x":7.334948307671936},"intercept":110217.08054997647,"coefficients":{"x":-2032.2147834995826}},{"state":{"x":9.276037046428645},"intercept":122006.13139570816,"coefficients":{"x":-1930.6040513499606}},{"state":{"x":6.976038096403019},"intercept":141006.62873732453,"coefficients":{"x":-1834.0738728768026}},{"state":{"x":4.676039184995568},"intercept":158690.31340293546,"coefficients":{"x":-1794.913462363961}},{"state":{"x":2.3760403674278985},"intercept":175973.20023844708,"coefficients":{"x":-2151.722615467569}},{"state":{"x":5.0760419743493825},"intercept":182744.3756458228,"coefficients":{"x":-2044.136506036361}},{"state":{"x":7.776041922853973},"intercept":189415.47819387383,"coefficients":{"x":-1941.9296877768465}},{"state":{"x":5.476042148152215},"intercept":205095.3439979205,"coefficients":{"x":-1844.8332623912456}},{"state":{"x":3.176043283918792},"intercept":219702.89644986653,"coefficients":{"x":-2167.530318552344}},{"state":{"x":3.876044639111486},"intercept":228370.85474617465,"coefficients":{"x":-2059.153829325728}},{"state":{"x":4.576045883824845},"intercept":236639.73466951397,"coefficients":{"x":-1956.1961479763947}},{"state":{"x":2.2760470721390464},"intercept":250364.45138307905,"coefficients":{"x":-2202.7954283927484}},{"state":{"x":2.9760487413265793},"intercept":257536.43348017396,"coefficients":{"x":-2345.1037311818172}},{"state":{"x":3.676050136402303},"intercept":264256.48301632307,"coefficients":{"x":-2227.8485746441866}},{"state":{"x":1.37605036170031},"intercept":277453.00559802284,"coefficients":{"x":-2360.970836545438}},{"state":{"x":1.376050361545732},"intercept":285090.0075013862,"coefficients":{"x":-2445.2815833494064}},{"state":{"x":2.976049054838646},"intercept":288326.8590152926,"coefficients":{"x":-2498.678221529761}},{"state":{"x":0.6760504499143289},"intercept":347152.8024748099,"coefficients":{"x":-318407.9144185648}},{"state":{"x":1.3760503617917745},"intercept":316070.8903132867,"coefficients":{"x":-102570.42087611053}},{"state":{"x":1.399994643866852},"intercept":328267.4302158122,"coefficients":{"x":-34221.88143296341}},{"state":{"x":4.099995368359592},"intercept":292632.5849806075,"coefficients":{"x":-2532.496233461436}},{"state":{"x":4.799995319082485},"intercept":297569.39010720694,"coefficients":{"x":-2553.9142168380195}},{"state":{"x":5.499995268291348},"intercept":302221.67171263706,"coefficients":{"x":-2426.2185431900657}},{"state":{"x":8.199996403059489},"intercept":302071.9582349289,"coefficients":{"x":-2304.907631777796}},{"state":{"x":5.899997127726299},"intercept":313146.94856794586,"coefficients":{"x":-2189.6622772850724}},{"state":{"x":6.599997852394495},"intercept":317135.6970837011,"coefficients":{"x":-2080.1791832686504}},{"state":{"x":9.299998950333483},"intercept":317007.3367126343,"coefficients":{"x":-1976.1702367294315}},{"state":{"x":1.2110766289770614},"intercept":360529.56010977516,"coefficients":{"x":-34056.420889004454}},{"state":{"x":0.0},"intercept":641876.5784654046,"coefficients":{"x":-328084.5332578369}},{"state":{"x":0.5110756583242404},"intercept":499364.1476814432,"coefficients":{"x":-328084.53321034234}},{"state":{"x":1.21107557029348},"intercept":390759.7836939916,"coefficients":{"x":-105476.76883837987}},{"state":{"x":1.2110755702934801},"intercept":390660.1736337749,"coefficients":{"x":-105476.7687539343}},{"state":{"x":1.211075547706911},"intercept":390660.1760203168,"coefficients":{"x":-105476.76873607034}},{"state":{"x":2.6747444913648444},"intercept":335188.23500127485,"coefficients":{"x":-2209.120673683118}},{"state":{"x":2.3397959214856345},"intercept":343315.19114857353,"coefficients":{"x":-35050.531502005404}},{"state":{"x":5.004847334724757},"intercept":334712.8049533929,"coefficients":{"x":-2349.1097454151886}},{"state":{"x":7.669898414120939},"intercept":333187.4558449335,"coefficients":{"x":-2231.654281204381}},{"state":{"x":10.334949324168644},"intercept":331994.8344739869,"coefficients":{"x":-2120.071581934777}},{"state":{"x":9.099999187461124},"intercept":338960.3484174822,"coefficients":{"x":-2014.068019922966}},{"state":{"x":6.799999333302771},"intercept":347454.1269238311,"coefficients":{"x":-1913.3646397601287}},{"state":{"x":7.499999485318099},"intercept":349653.281210555,"coefficients":{"x":-1817.6964561621235}},{"state":{"x":8.19999959016092},"intercept":351594.1795330839,"coefficients":{"x":-1726.8116660024466}},{"state":{"x":5.899999533184279},"intercept":358266.3799195765,"coefficients":{"x":-1726.9807303967902}},{"state":{"x":6.599999726995124},"intercept":359636.0017985672,"coefficients":{"x":-1640.6317776937512}},{"state":{"x":9.299999917199154},"intercept":357686.08991271054,"coefficients":{"x":-1558.6002122307905}},{"state":{"x":1.5868063905196321},"intercept":390214.4652070011,"coefficients":{"x":-34844.53361102563}},{"state":{"x":1.5868063894936493},"intercept":390534.9627152055,"coefficients":{"x":-34984.31008204813}},{"state":{"x":3.0048452380309962},"intercept":370586.8417311528,"coefficients":{"x":-2076.8901882395594}},{"state":{"x":7.669896825133819},"intercept":363940.54655975185,"coefficients":{"x":-1948.6971213548416}},{"state":{"x":10.334948412649963},"intercept":361732.3243187864,"coefficients":{"x":-1851.2622805418814}},{"state":{"x":1.6022672602381465},"intercept":394988.8174163748,"coefficients":{"x":-34984.30982998743}},{"state":{"x":2.3022671742073593},"intercept":384991.5714587992,"coefficients":{"x":-12614.597865064712}},{"state":{"x":1.9673187347391143},"intercept":389604.0359369499,"coefficients":{"x":-12661.698156147886}},{"state":{"x":1.967318881936273},"intercept":389616.75637088047,"coefficients":{"x":-12661.698170729554}},{"state":{"x":1.6022672449604054},"intercept":396106.99573334295,"coefficients":{"x":-38043.84770535908}},{"state":{"x":2.3022671589296175},"intercept":385730.31555991987,"coefficients":{"x":-13630.551657218253}},{"state":{"x":1.9673187194613748},"intercept":390295.84760942543,"coefficients":{"x":-13630.55179884113}},{"state":{"x":2.6372156440525956},"intercept":383489.7183471215,"coefficients":{"x":-5899.674590688319}},{"state":{"x":2.3022671742073593},"intercept":387112.8835855641,"coefficients":{"x":-14548.781560300187}},{"state":{"x":1.9673187347391146},"intercept":392056.9633355652,"coefficients":{"x":-14865.448698659997}},{"state":{"x":1.9673188822648664},"intercept":392056.96114263864,"coefficients":{"x":-14865.448679845462}},{"state":{"x":1.267318820764241},"intercept":416849.3210019446,"coefficients":{"x":-106711.66581344437}},{"state":{"x":1.9673187347391126},"intercept":394590.16944050975,"coefficients":{"x":-36610.25754452596}},{"state":{"x":1.9673187347880856},"intercept":395656.71093638777,"coefficients":{"x":-14411.478648783002}},{"state":{"x":1.2673188203565189},"intercept":421201.4628134103,"coefficients":{"x":-39305.662555130715}},{"state":{"x":1.9673187343313916},"intercept":395968.3477454449,"coefficients":{"x":-15265.023537826153}},{"state":{"x":2.302267174207357},"intercept":390890.4310666352,"coefficients":{"x":-14948.35675163683}},{"state":{"x":1.9673187347391123},"intercept":395968.3477398138,"coefficients":{"x":-15265.02355300997}},{"state":{"x":1.9673187347391148},"intercept":395968.34773981373,"coefficients":{"x":-15265.02355300997}},{"state":{"x":1.967318882264866},"intercept":395968.3454878297,"coefficients":{"x":-15265.023553009629}},{"state":{"x":1.2673189682899935},"intercept":421201.4569965016,"coefficients":{"x":-39305.66262878144}},{"state":{"x":1.9673188822648657},"intercept":395968.34549549065,"coefficients":{"x":-15265.02357201565}},{"state":{"x":1.2673188203565189},"intercept":421201.46281117667,"coefficients":{"x":-39305.66262792696}},{"state":{"x":1.9673187343313916},"intercept":395968.3477537068,"coefficients":{"x":-15265.023571881276}},{"state":{"x":2.3022671742563303},"intercept":390890.4310686814,"coefficients":{"x":-14948.356750574405}},{"state":{"x":1.9673187347880854},"intercept":395968.3477467354,"coefficients":{"x":-15265.023571881276}},{"state":{"x":1.2673188208132142},"intercept":421201.4627932259,"coefficients":{"x":-39305.662627926984}},{"state":{"x":1.9673187347880854},"intercept":395968.34775386524,"coefficients":{"x":-15265.023581689975}},{"state":{"x":1.267318820813214},"intercept":421201.46279320447,"coefficients":{"x":-39305.66262723233}},{"state":{"x":1.9673187347880856},"intercept":395968.34776281344,"coefficients":{"x":-15265.023591251604}},{"state":{"x":1.2673188203565189},"intercept":421201.4628112491,"coefficients":{"x":-39305.662626403755}},{"state":{"x":1.9673187343313916},"intercept":395968.3477608443,"coefficients":{"x":-15265.023581529405}},{"state":{"x":2.30226717425633},"intercept":390890.4310743835,"coefficients":{"x":-14948.356740502528}},{"state":{"x":1.967318734788084},"intercept":395968.34775534295,"coefficients":{"x":-15265.023589321398}},{"state":{"x":1.2673189681764596},"intercept":421201.4570006434,"coefficients":{"x":-39305.66265300135}},{"state":{"x":1.967318882151332},"intercept":395968.3455135904,"coefficients":{"x":-15265.023598952817}},{"state":{"x":0.5673189090871034},"intercept":522814.0710837374,"coefficients":{"x":-330063.45547729474}},{"state":{"x":1.2673188203565173},"intercept":431102.55071398464,"coefficients":{"x":-110033.72901058463}},{"state":{"x":1.96731873433139},"intercept":399103.6921811189,"coefficients":{"x":-37662.24435019926}},{"state":{"x":2.302267174207358},"intercept":392415.57928818295,"coefficients":{"x":-14427.941032061965}},{"state":{"x":1.9673187347391126},"intercept":399103.69216521643,"coefficients":{"x":-37662.24435176261}},{"state":{"x":1.9673188822648662},"intercept":399103.6866091913,"coefficients":{"x":-37662.24425746003}},{"state":{"x":1.2673188199050534},"intercept":431603.39306248876,"coefficients":{"x":-110038.94202365348}},{"state":{"x":1.9673187338799256},"intercept":399262.2922591621,"coefficients":{"x":-37663.89497710635}},{"state":{"x":0.9022673478951462},"intercept":471773.27090053644,"coefficients":{"x":-110038.94229373526}},{"state":{"x":1.6022672598304237},"intercept":413591.94342198805,"coefficients":{"x":-40364.51297255164}},{"state":{"x":2.302267173799636},"intercept":392649.78387043474,"coefficients":{"x":-15283.659433615923}},{"state":{"x":1.9673187343313914},"intercept":399262.2922466388,"coefficients":{"x":-37663.89508257583}},{"state":{"x":2.302267214530231},"intercept":392649.7832472052,"coefficients":{"x":-15283.65942888247}},{"state":{"x":1.9673187750619794},"intercept":399262.2907130417,"coefficients":{"x":-37663.895086433564}},{"state":{"x":2.972164240127027},"intercept":384488.0024391501,"coefficients":{"x":-7341.3891489985945}},{"state":{"x":2.637215791578382},"intercept":388141.22183361615,"coefficients":{"x":-14885.00655164501}},{"state":{"x":2.3022673217331384},"intercept":393320.8600447513,"coefficients":{"x":-16056.868524892903}},{"state":{"x":1.967318882264869},"intercept":400121.3626138201,"coefficients":{"x":-38120.43797804411}},{"state":{"x":1.2673189682899961},"intercept":431842.21853917255,"coefficients":{"x":-110183.67935969353}},{"state":{"x":1.967318882264869},"intercept":400196.9958478507,"coefficients":{"x":-38166.27141034075}},{"state":{"x":1.267318968176459},"intercept":431842.21855183446,"coefficients":{"x":-110183.6794933074}},{"state":{"x":1.9673188821513308},"intercept":400196.99585215724,"coefficients":{"x":-38166.27156116972}},{"state":{"x":0.5673189094948293},"intercept":538515.903022638,"coefficients":{"x":-329702.65251775645}},{"state":{"x":1.2673188207642434},"intercept":436814.48161758634,"coefficients":{"x":-110069.42529553745}},{"state":{"x":1.967318734739115},"intercept":401771.54630600975,"coefficients":{"x":-38130.091092069684}},{"state":{"x":1.9673187347391137},"intercept":401771.5463058789,"coefficients":{"x":-38130.09113175757}},{"state":{"x":1.9673188817252705},"intercept":401771.54070140887,"coefficients":{"x":-38130.09107118641}},{"state":{"x":4.669896932701913},"intercept":376624.8955345056,"coefficients":{"x":-3861.0063447209986}},{"state":{"x":7.33494841703894},"intercept":370767.98124917416,"coefficients":{"x":-2758.885030961777}},{"state":{"x":6.600001243459023},"intercept":375456.3373002433,"coefficients":{"x":-2620.940867968088}},{"state":{"x":7.3000011388236805},"intercept":376193.04657441954,"coefficients":{"x":-2489.893872550569}},{"state":{"x":8.000001034598373},"intercept":376934.4185928042,"coefficients":{"x":-2365.399217748636}},{"state":{"x":8.700000929650473},"intercept":377678.1453103068,"coefficients":{"x":-2247.1292850905174}},{"state":{"x":11.400000824142676},"intercept":374152.5921911739,"coefficients":{"x":-2134.7728389650374}},{"state":{"x":9.100000720627644},"intercept":381192.54449824966,"coefficients":{"x":-2028.0342227438523}},{"state":{"x":11.80000061476332},"intercept":377976.93214845716,"coefficients":{"x":-1926.6325261672416}},{"state":{"x":12.500000511666968},"intercept":378670.51361489587,"coefficients":{"x":-1830.3009188997623}},{"state":{"x":13.20000040908882},"intercept":379180.0892606239,"coefficients":{"x":-1738.785889244455}},{"state":{"x":13.900000306864309},"intercept":379522.3257105462,"coefficients":{"x":-1651.8466097273467}},{"state":{"x":14.60000020438068},"intercept":379712.6829375427,"coefficients":{"x":-1569.2542931631688}},{"state":{"x":12.300000102743727},"intercept":384237.86800762557,"coefficients":{"x":-1490.791596565729}},{"state":{"x":2.0254477266394244},"intercept":410851.8687579187,"coefficients":{"x":-36277.401679997514}},{"state":{"x":1.3254478126550444},"intercept":440276.3060850578,"coefficients":{"x":-105989.17313785825}},{"state":{"x":2.025447724543697},"intercept":414129.41867830604,"coefficients":{"x":-34985.32282945345}},{"state":{"x":2.025447724543699},"intercept":414198.3248617031,"coefficients":{"x":-35146.571026037745}},{"state":{"x":2.025447724226735},"intercept":414198.3009995523,"coefficients":{"x":-35146.57098114291}},{"state":{"x":2.3603961098695905},"intercept":409510.73269275285,"coefficients":{"x":-12551.8316074991}},{"state":{"x":2.025447724543698},"intercept":414738.43929685873,"coefficients":{"x":-35146.57106294856}},{"state":{"x":2.0254477266394244},"intercept":414738.43922235584,"coefficients":{"x":-35146.57120808111}},{"state":{"x":1.3254478126550422},"intercept":442944.27303486085,"coefficients":{"x":-109013.91955597432}},{"state":{"x":2.025447724543696},"intercept":415583.2954971347,"coefficients":{"x":-36104.4078939756}},{"state":{"x":2.025447726639424},"intercept":415583.29542063014,"coefficients":{"x":-36104.407939124736}},{"state":{"x":1.325447812655043},"intercept":442944.2730312205,"coefficients":{"x":-109013.89327370132}},{"state":{"x":2.0254477245436973},"intercept":415583.2955043535,"coefficients":{"x":-36104.40349012345}},{"state":{"x":2.0254478244801244},"intercept":415583.2918962084,"coefficients":{"x":-36104.4034901196}},{"state":{"x":6.6747420176200585},"intercept":393109.91614840797,"coefficients":{"x":-1577.5013888086723}},{"state":{"x":6.33979361406273},"intercept":394265.35873771703,"coefficients":{"x":-1632.417612309206}},{"state":{"x":6.004845210544334},"intercept":395505.5085028688,"coefficients":{"x":-1667.197948211733}},{"state":{"x":5.669896807060497},"intercept":396825.85512628744,"coefficients":{"x":-2111.2791103261943}},{"state":{"x":10.334948403601656},"intercept":389351.82317319035,"coefficients":{"x":-1689.2254085769464}}],"multi_cuts":[]}] \ No newline at end of file +[{"risk_set_cuts":[],"node":"1","single_cuts":[{"state":{"x":0.0},"intercept":243326.5932873207,"coefficients":{"x":-317616.6666712048}},{"state":{"x":0.3349461006592544},"intercept":214784.27893914568,"coefficients":{"x":-318249.99987099384}},{"state":{"x":0.0},"intercept":346483.98066657834,"coefficients":{"x":-318250.0000028512}},{"state":{"x":0.3349461006592544},"intercept":247961.59185993217,"coefficients":{"x":-318249.99996627046}},{"state":{"x":0.0},"intercept":357155.19102695247,"coefficients":{"x":-318250.00002266077}},{"state":{"x":2.0048370032484706},"intercept":20705.857493325177,"coefficients":{"x":-1583.3334217233435}},{"state":{"x":4.669890967482002},"intercept":35913.29065408911,"coefficients":{"x":-1636.1111774305496}},{"state":{"x":7.33494488161516},"intercept":50205.41313090136,"coefficients":{"x":-1554.3056315383567}},{"state":{"x":1.268175613984895},"intercept":114949.47713380735,"coefficients":{"x":-102221.36345543271}},{"state":{"x":0.9332272030619956},"intercept":149670.80577402463,"coefficients":{"x":-102362.50001932161}},{"state":{"x":0.933227202612769},"intercept":149672.93057816324,"coefficients":{"x":-102362.50001937989}},{"state":{"x":3.3397936487934445},"intercept":74623.24273843739,"coefficients":{"x":-2075.5301266106544}},{"state":{"x":8.004845236911155},"intercept":82756.2150515296,"coefficients":{"x":-1971.753627990945}},{"state":{"x":7.669896824590542},"intercept":100219.40598231688,"coefficients":{"x":-1873.1659704766685}},{"state":{"x":7.3349484122902},"intercept":116559.68194318816,"coefficients":{"x":-1779.5076863206516}},{"state":{"x":1.1491815902238438},"intercept":170094.82367224054,"coefficients":{"x":-102292.67735797323}},{"state":{"x":1.149181590223844},"intercept":178971.7503036715,"coefficients":{"x":-33976.01439884535}},{"state":{"x":1.1491815899737854},"intercept":182416.44259406766,"coefficients":{"x":-43785.08577465985}},{"state":{"x":1.484130062526308},"intercept":168127.0092481356,"coefficients":{"x":-33976.01444790188}},{"state":{"x":1.1491815902238438},"intercept":182587.17028538653,"coefficients":{"x":-43785.085783349234}},{"state":{"x":1.1491815898724624},"intercept":182587.1703007736,"coefficients":{"x":-43785.08578338941}},{"state":{"x":2.1540270116948714},"intercept":154978.0755765416,"coefficients":{"x":-15448.60910559536}},{"state":{"x":1.8190785369205769},"intercept":165520.30896643223,"coefficients":{"x":-19707.33669457065}},{"state":{"x":1.4841300622762488},"intercept":175327.11730175948,"coefficients":{"x":-39583.33761959751}},{"state":{"x":1.1491815899737854},"intercept":188585.4958220573,"coefficients":{"x":-39583.33781802851}},{"state":{"x":1.4841300625263085},"intercept":175327.11729165664,"coefficients":{"x":-39583.33749917974}},{"state":{"x":1.149181590223844},"intercept":188585.49581204227,"coefficients":{"x":-39583.33779853178}},{"state":{"x":1.1491815703212243},"intercept":188585.4966006031,"coefficients":{"x":-39583.33778891499}},{"state":{"x":0.449181678944376},"intercept":309605.63519955456,"coefficients":{"x":-323857.32350638014}},{"state":{"x":1.1491815902238438},"intercept":210089.04550280658,"coefficients":{"x":-109745.47565701211}},{"state":{"x":1.1491815886618142},"intercept":210089.0456737587,"coefficients":{"x":-109745.4756044481}},{"state":{"x":0.0},"intercept":491717.48564439546,"coefficients":{"x":-352369.40125122794}},{"state":{"x":0.08413023945600918},"intercept":473675.57049516944,"coefficients":{"x":-352369.40032746637}},{"state":{"x":0.7841301507358256},"intercept":264669.48456068314,"coefficients":{"x":-118774.3001669984}},{"state":{"x":1.4841300627018887},"intercept":193450.7650075414,"coefficients":{"x":-44802.5183441784}},{"state":{"x":1.1491815903994245},"intercept":220460.72456327028,"coefficients":{"x":-117425.7035158815}},{"state":{"x":0.449181678947928},"intercept":360539.3452273108,"coefficients":{"x":-331804.1308254233}},{"state":{"x":1.1491815902273954},"intercept":225368.02291087975,"coefficients":{"x":-110913.36817713818}},{"state":{"x":2.669896931731039},"intercept":156895.9810126298,"coefficients":{"x":-19712.857018496263}},{"state":{"x":5.334948472507697},"intercept":135626.3335366288,"coefficients":{"x":-2146.844182631823}},{"state":{"x":8.155026293008879},"intercept":144180.7704806292,"coefficients":{"x":-2039.5019968062848}},{"state":{"x":5.855026408639713},"intercept":162477.96063249136,"coefficients":{"x":-1937.526926951285}},{"state":{"x":3.555026510897618},"intercept":179934.64261829923,"coefficients":{"x":-2196.8835384520276}},{"state":{"x":3.555026510910723},"intercept":192256.16566457867,"coefficients":{"x":-2341.3595949212395}},{"state":{"x":5.455024831962723},"intercept":199572.2711382006,"coefficients":{"x":-2224.2916372075974}},{"state":{"x":6.155024980257584},"intercept":209228.78874930658,"coefficients":{"x":-2113.0770722779353}},{"state":{"x":8.855025123464749},"intercept":214422.85201753268,"coefficients":{"x":-2007.4232278793638}},{"state":{"x":6.555025210468281},"intercept":229126.6719673162,"coefficients":{"x":-1907.05208002689}},{"state":{"x":4.255025351620916},"intercept":242709.60653625417,"coefficients":{"x":-1841.1330306597051}},{"state":{"x":6.955025636414863},"intercept":246535.8652997343,"coefficients":{"x":-1749.0764061185946}},{"state":{"x":4.655025775921051},"intercept":258580.44223524936,"coefficients":{"x":-1741.0817385152336}},{"state":{"x":7.355025942068891},"intercept":261648.1641649529,"coefficients":{"x":-1654.0276714963898}},{"state":{"x":10.055026080197097},"intercept":264592.9574496461,"coefficients":{"x":-1571.3262951554157}},{"state":{"x":7.755026146615816},"intercept":274883.2456262882,"coefficients":{"x":-1492.75999188085}},{"state":{"x":10.455026232903132},"intercept":277222.7072766031,"coefficients":{"x":-1418.1219990607337}},{"state":{"x":8.155026294132734},"intercept":286207.3887990464,"coefficients":{"x":-1347.2159093825917}},{"state":{"x":5.855026409763568},"intercept":294430.8400013744,"coefficients":{"x":-1279.8551426924732}},{"state":{"x":3.5550265120214717},"intercept":302343.1311682506,"coefficients":{"x":-1443.9083121631993}},{"state":{"x":5.255025723516475},"intercept":304702.2418617136,"coefficients":{"x":-1547.8082375408476}},{"state":{"x":7.955025873850883},"intercept":305531.4940012441,"coefficients":{"x":-1470.417836403087}},{"state":{"x":8.655025969386415},"intercept":309140.15390838817,"coefficients":{"x":-1396.896953711898}},{"state":{"x":11.35502606253733},"intercept":309800.3095191744,"coefficients":{"x":-1327.0521119826922}},{"state":{"x":9.055026128262227},"intercept":316755.3913726764,"coefficients":{"x":-1260.6995147770738}},{"state":{"x":6.755026222525912},"intercept":323070.75889407715,"coefficients":{"x":-1197.6645545706876}},{"state":{"x":7.455026305089065},"intercept":325379.6519099872,"coefficients":{"x":-1137.7813389545372}},{"state":{"x":5.155026442909868},"intercept":330734.26211109466,"coefficients":{"x":-1353.927797368438}},{"state":{"x":2.8550265944039057},"intercept":337420.76587421953,"coefficients":{"x":-2012.0770817228426}},{"state":{"x":3.555026510897616},"intercept":340252.06052970496,"coefficients":{"x":-1911.4732583116916}},{"state":{"x":3.555026511477267},"intercept":344124.44567400985,"coefficients":{"x":-1843.933109632355}},{"state":{"x":2.8550265944039075},"intercept":349065.9581661391,"coefficients":{"x":-2167.2454221871712}},{"state":{"x":3.5550265108976182},"intercept":351265.85655340325,"coefficients":{"x":-2058.8831847080914}},{"state":{"x":3.5550265114772697},"intercept":354759.23189205385,"coefficients":{"x":-1955.9390491175366}},{"state":{"x":2.8550254819528758},"intercept":359388.5081130965,"coefficients":{"x":-2202.71397249902}},{"state":{"x":3.555025398446586},"intercept":361061.0472946449,"coefficients":{"x":-2092.5783084777213}},{"state":{"x":4.25502531496953},"intercept":362684.83580925677,"coefficients":{"x":-1987.9494110270584}},{"state":{"x":6.9550255997635},"intercept":360460.9667621512,"coefficients":{"x":-1888.5519541646559}},{"state":{"x":7.655025739269688},"intercept":361971.31612351345,"coefficients":{"x":-1794.1243687391975}},{"state":{"x":8.355025836236665},"intercept":363259.7728641249,"coefficients":{"x":-1704.4181597687357}},{"state":{"x":9.055025932201513},"intercept":364344.75027729105,"coefficients":{"x":-1619.1972601503319}},{"state":{"x":9.755026026465199},"intercept":365243.3751569231,"coefficients":{"x":-1538.2374045542701}},{"state":{"x":7.455026119518702},"intercept":370355.5469139549,"coefficients":{"x":-1461.3255461977144}},{"state":{"x":5.155026257339504},"intercept":374873.68767090025,"coefficients":{"x":-1388.259374463504}},{"state":{"x":5.8550264088335435},"intercept":374887.8813388905,"coefficients":{"x":-1318.8464300203373}},{"state":{"x":3.5550265110914485},"intercept":378874.77912685706,"coefficients":{"x":-1468.602773916724}},{"state":{"x":3.0999984506933145},"intercept":380752.20804846927,"coefficients":{"x":-1563.4489817430042}},{"state":{"x":3.799998367182149},"intercept":380915.528317814,"coefficients":{"x":-1623.5177093557206}},{"state":{"x":4.499998590799713},"intercept":381056.2184749148,"coefficients":{"x":-1661.5612110830903}},{"state":{"x":5.199998703037432},"intercept":381184.9396422239,"coefficients":{"x":-1578.483228276576}},{"state":{"x":7.899998853999074},"intercept":378179.3247006333,"coefficients":{"x":-1499.55907742998}},{"state":{"x":8.59999899061622},"intercept":378200.7657921226,"coefficients":{"x":-1424.5811325889435}},{"state":{"x":9.299999084808695},"intercept":378104.90915819723,"coefficients":{"x":-1353.3520838783195}},{"state":{"x":9.999999185413483},"intercept":377903.4309013332,"coefficients":{"x":-1285.6844867491332}},{"state":{"x":10.69999925481104},"intercept":377607.132870405,"coefficients":{"x":-1221.4002687960435}},{"state":{"x":8.399999311363882},"intercept":380706.9914803375,"coefficients":{"x":-1160.3302708998146}},{"state":{"x":9.099999395199148},"intercept":380076.1997848356,"coefficients":{"x":-1102.3137896387698}},{"state":{"x":6.799999489466939},"intercept":382661.99888293014,"coefficients":{"x":-1103.2807560116744}},{"state":{"x":7.499999566535523},"intercept":381905.36993162974,"coefficients":{"x":-1103.586847282372}},{"state":{"x":8.199999704222199},"intercept":381151.2030147172,"coefficients":{"x":-1085.7150436696033}},{"state":{"x":10.899999751890837},"intercept":378324.99079582107,"coefficients":{"x":-1037.0887250618464}},{"state":{"x":8.599999805202852},"intercept":380735.63234652794,"coefficients":{"x":-1045.318624008028}},{"state":{"x":9.299999899395319},"intercept":380009.2356194167,"coefficients":{"x":-1047.9246134449334}},{"state":{"x":0.3594940304571542},"intercept":537906.1317365929,"coefficients":{"x":-318023.8000572473}},{"state":{"x":0.0},"intercept":697771.5469663555,"coefficients":{"x":-318249.9999787308}},{"state":{"x":0.3594940409310607},"intercept":597892.8958152663,"coefficients":{"x":-318249.9999439591}},{"state":{"x":0.0},"intercept":716938.4003366556,"coefficients":{"x":-318250.00000078976}},{"state":{"x":0.0},"intercept":718417.883411682,"coefficients":{"x":-318250.0000028763}},{"state":{"x":0.6944424240164722},"intercept":497883.6746859181,"coefficients":{"x":-318249.99671778525}},{"state":{"x":0.35949404060097806},"intercept":604631.6384855562,"coefficients":{"x":-318249.9999518632}},{"state":{"x":0.35949403704012745},"intercept":604679.7087527572,"coefficients":{"x":-318249.9999519145}},{"state":{"x":1.0293909325946116},"intercept":462687.19297411933,"coefficients":{"x":-102115.94347407714}},{"state":{"x":0.6944424240164728},"intercept":498669.4142509098,"coefficients":{"x":-318249.998571546}},{"state":{"x":0.35949404060097795},"intercept":608468.4067116441,"coefficients":{"x":-349636.7149470919}},{"state":{"x":0.3594940304571542},"intercept":613055.329369961,"coefficients":{"x":-349636.7149552152}},{"state":{"x":0.0},"intercept":740200.1657838166,"coefficients":{"x":-349636.7153657388}},{"state":{"x":0.0},"intercept":740660.1318941033,"coefficients":{"x":-349636.7153658658}},{"state":{"x":0.35949404060097795},"intercept":615113.4722547997,"coefficients":{"x":-349636.7149554372}},{"state":{"x":0.3594940304571542},"intercept":615159.6001262375,"coefficients":{"x":-349636.71495544235}},{"state":{"x":0.0},"intercept":740866.5181899939,"coefficients":{"x":-349636.71536592365}},{"state":{"x":0.6944424240164722},"intercept":505565.72786928073,"coefficients":{"x":-318249.99857179483}},{"state":{"x":0.35949404060097806},"intercept":615178.8279151513,"coefficients":{"x":-349636.7149557339}},{"state":{"x":0.35949404060097817},"intercept":615180.2925387158,"coefficients":{"x":-349636.71495573403}},{"state":{"x":0.35949404159585396},"intercept":615180.7559883329,"coefficients":{"x":-349636.7149557341}},{"state":{"x":1.3446386424809131},"intercept":433283.63937315694,"coefficients":{"x":-102362.49927543449}},{"state":{"x":1.0096902874174092},"intercept":469041.9148776689,"coefficients":{"x":-112301.62630865777}},{"state":{"x":3.6747418996729104},"intercept":387326.68086261867,"coefficients":{"x":-1950.771258731704}},{"state":{"x":3.339793523341973},"intercept":389926.915418782,"coefficients":{"x":-2201.0775734311856}},{"state":{"x":6.004845093353954},"intercept":386081.2045153962,"coefficients":{"x":-2027.3490515168874}},{"state":{"x":5.6698967290835345},"intercept":388467.93246331497,"coefficients":{"x":-1925.9816000177323}},{"state":{"x":5.3349483629584915},"intercept":390553.9117132977,"coefficients":{"x":-1853.1217111446467}},{"state":{"x":1.6961998710080184},"intercept":422094.4931877247,"coefficients":{"x":-37099.00360978764}},{"state":{"x":1.69620001976265},"intercept":422367.1492936257,"coefficients":{"x":-37145.51493744289}},{"state":{"x":1.3311483460884042},"intercept":440412.78245012613,"coefficients":{"x":-102362.49938977088}},{"state":{"x":2.0311482580551847},"intercept":412293.96774638654,"coefficients":{"x":-33998.12449430643}},{"state":{"x":1.696199864091723},"intercept":423726.8325228316,"coefficients":{"x":-33998.124802735685}},{"state":{"x":2.0311482649714803},"intercept":412339.7916504517,"coefficients":{"x":-33998.12450768391}},{"state":{"x":1.696199871008018},"intercept":423727.41631844867,"coefficients":{"x":-33998.124802887}},{"state":{"x":1.6961998710080184},"intercept":423727.4164105144,"coefficients":{"x":-33998.12480288705}},{"state":{"x":1.6962000211249677},"intercept":423727.411307993,"coefficients":{"x":-33998.12480288699}},{"state":{"x":0.9962001093198705},"intercept":478510.82966974843,"coefficients":{"x":-122117.6992430361}},{"state":{"x":1.6962000211961294},"intercept":424934.5156710029,"coefficients":{"x":-40253.93804348352}},{"state":{"x":5.669897009690065},"intercept":391217.75173030375,"coefficients":{"x":-1806.9771490108446}},{"state":{"x":5.334948505667714},"intercept":392951.2586138514,"coefficients":{"x":-1777.7523387270867}},{"state":{"x":3.120506510768858},"intercept":398956.91868690774,"coefficients":{"x":-12279.02691949514}},{"state":{"x":2.785558076650702},"intercept":403791.27940416115,"coefficients":{"x":-5471.692063824315}},{"state":{"x":2.450609664507464},"intercept":408949.0169176871,"coefficients":{"x":-15113.116170283758}},{"state":{"x":2.4506096645074695},"intercept":408949.0169175407,"coefficients":{"x":-15113.116193883016}},{"state":{"x":2.450609664507464},"intercept":408949.016917923,"coefficients":{"x":-15113.11613928409}},{"state":{"x":2.4506096645074695},"intercept":408949.0169179229,"coefficients":{"x":-15113.116139284091}},{"state":{"x":2.4506096645074678},"intercept":408949.0169179229,"coefficients":{"x":-15113.11613928409}},{"state":{"x":2.4506096645074655},"intercept":408949.01691945695,"coefficients":{"x":-15113.116084945266}},{"state":{"x":2.4506096645003574},"intercept":408949.01691803033,"coefficients":{"x":-15113.116139284091}},{"state":{"x":2.785558073391243},"intercept":405535.0784522378,"coefficients":{"x":-7151.85600435222}},{"state":{"x":2.4506096612480057},"intercept":408987.76240466855,"coefficients":{"x":-15645.167807961627}},{"state":{"x":1.7506097509509577},"intercept":426711.91014303104,"coefficients":{"x":-41885.359047948055}},{"state":{"x":2.450609664507468},"intercept":410244.1691328399,"coefficients":{"x":-16161.784595667094}},{"state":{"x":2.4506096517628975},"intercept":410244.1693387629,"coefficients":{"x":-16161.784586630356}},{"state":{"x":3.1205064980242883},"intercept":403559.7036805146,"coefficients":{"x":-7483.934146734947}},{"state":{"x":2.785558063906133},"intercept":406066.4357492571,"coefficients":{"x":-7483.934376760704}},{"state":{"x":2.450609651762897},"intercept":410310.9494445148,"coefficients":{"x":-16266.942739832335}},{"state":{"x":3.1205065074600866},"intercept":403581.85306045087,"coefficients":{"x":-7517.234168718434}},{"state":{"x":2.785558073341931},"intercept":406099.7389510607,"coefficients":{"x":-7517.234471980841}},{"state":{"x":2.4506096611986945},"intercept":410311.3188767014,"coefficients":{"x":-16277.487515121753}},{"state":{"x":2.0855582551521836},"intercept":416287.517450636,"coefficients":{"x":-16594.154434002478}},{"state":{"x":2.785558169219923},"intercept":406111.86588320526,"coefficients":{"x":-7620.851476311586}},{"state":{"x":2.4506097570766747},"intercept":410311.3173164794,"coefficients":{"x":-16277.487479021776}},{"state":{"x":6.339793873863363},"intercept":392676.6914506476,"coefficients":{"x":-2146.2882493996754}},{"state":{"x":6.004845410143706},"intercept":394964.33792923175,"coefficients":{"x":-2309.315909101239}},{"state":{"x":10.669896947506695},"intercept":386125.59939540154,"coefficients":{"x":-2193.8501390673114}},{"state":{"x":10.334948473732634},"intercept":388621.08648563665,"coefficients":{"x":-2084.157657051128}},{"state":{"x":11.006650416126917},"intercept":388925.5681549263,"coefficients":{"x":-1979.9497963164358}},{"state":{"x":13.706650486567295},"intercept":385392.4617287222,"coefficients":{"x":-1880.9523325599748}},{"state":{"x":16.406650514524003},"intercept":382070.6467023389,"coefficients":{"x":-1786.9047303537477}},{"state":{"x":14.10665056566985},"intercept":387435.62407443835,"coefficients":{"x":-1697.5595117606422}},{"state":{"x":14.806650596663955},"intercept":387301.1772269141,"coefficients":{"x":-1612.6815525574239}},{"state":{"x":12.506650634196479},"intercept":391638.0230626307,"coefficients":{"x":-1532.0474965774756}},{"state":{"x":10.20665097060456},"intercept":395403.2256571818,"coefficients":{"x":-1455.445167085515}},{"state":{"x":10.906651230065135},"intercept":394514.7979342571,"coefficients":{"x":-1406.9303494866663}},{"state":{"x":11.606651303532688},"intercept":393598.2341143392,"coefficients":{"x":-1376.204235434106}},{"state":{"x":9.30665136249376},"intercept":396802.3123026786,"coefficients":{"x":-1431.6288147561604}},{"state":{"x":7.006651690993963},"intercept":400194.26248048793,"coefficients":{"x":-1597.3649703376636}},{"state":{"x":4.70665204646155},"intercept":404429.5163981754,"coefficients":{"x":-2089.165550675018}},{"state":{"x":5.406652488917946},"intercept":403938.4416334665,"coefficients":{"x":-1956.4715282863613}},{"state":{"x":3.106652888454721},"intercept":410085.35258391674,"coefficients":{"x":-6724.086871863283}},{"state":{"x":3.6066517504557676},"intercept":409745.65468073473,"coefficients":{"x":-3698.8434914317622}},{"state":{"x":6.306651800679159},"intercept":403486.6292000351,"coefficients":{"x":-2202.882659169616}},{"state":{"x":7.006652170720468},"intercept":402954.20796990214,"coefficients":{"x":-2092.7384350019715}},{"state":{"x":4.706652526188047},"intercept":408849.33750858915,"coefficients":{"x":-2275.4010395038767}},{"state":{"x":2.406652968644402},"intercept":419244.71311517304,"coefficients":{"x":-14934.240610734476}},{"state":{"x":3.1066528863704965},"intercept":415184.66594647,"coefficients":{"x":-6399.719825835016}},{"state":{"x":3.0066531155344336},"intercept":416041.6082538012,"coefficients":{"x":-6399.719690219639}},{"state":{"x":1.7066530489523035},"intercept":436983.5414034796,"coefficients":{"x":-41417.72629887883}},{"state":{"x":2.406652962398816},"intercept":422314.47350159346,"coefficients":{"x":-15862.73475385749}},{"state":{"x":3.1066528801249103},"intercept":416385.7997574876,"coefficients":{"x":-6693.742990449311}},{"state":{"x":4.306651224670807},"intercept":411484.22784821223,"coefficients":{"x":-3790.228909629375}},{"state":{"x":5.006651709431396},"intercept":410156.7979485184,"coefficients":{"x":-2870.782835612371}},{"state":{"x":7.706652130234052},"intercept":403937.37503986433,"coefficients":{"x":-2538.706128139446}},{"state":{"x":5.4066524750167995},"intercept":410618.49595200235,"coefficients":{"x":-2557.847240099085}},{"state":{"x":3.1066528745535757},"intercept":418554.83434034773,"coefficients":{"x":-6783.184237556103}},{"state":{"x":4.70665252667139},"intercept":413241.06989388773,"coefficients":{"x":-2569.969976205149}},{"state":{"x":2.406652969127746},"intercept":425007.9175919115,"coefficients":{"x":-14879.437107573209}},{"state":{"x":3.10665288685384},"intercept":419980.96027451433,"coefficients":{"x":-6475.645553047985}},{"state":{"x":5.406652488917895},"intercept":412250.7800006993,"coefficients":{"x":-2577.6476804328595}},{"state":{"x":3.10665288845467},"intercept":420521.6925596467,"coefficients":{"x":-6478.076715525404}},{"state":{"x":4.006651995131192},"intercept":417119.6531868028,"coefficients":{"x":-3817.646048921004}},{"state":{"x":4.706652045354932},"intercept":415392.21099622734,"coefficients":{"x":-2975.176375905775}},{"state":{"x":5.406652487811328},"intercept":414026.8555342918,"coefficients":{"x":-2834.278394940815}},{"state":{"x":3.1066528873481025},"intercept":421787.49885361805,"coefficients":{"x":-3898.913837487905}},{"state":{"x":3.106652888454719},"intercept":421848.1854963664,"coefficients":{"x":-3898.9138415169987}},{"state":{"x":5.606651021598},"intercept":414188.4667032347,"coefficients":{"x":-2745.043027428624}},{"state":{"x":3.3066514129353157},"intercept":421465.37403314153,"coefficients":{"x":-3053.920318124864}},{"state":{"x":4.006652476941914},"intercept":419694.19136480906,"coefficients":{"x":-2884.149600195137}},{"state":{"x":4.706652527165653},"intercept":418160.04280414653,"coefficients":{"x":-2776.6281085363744}},{"state":{"x":2.406652969622008},"intercept":428149.3628766297,"coefficients":{"x":-14944.878717370271}},{"state":{"x":3.1066528873481025},"intercept":424082.58494786004,"coefficients":{"x":-6561.8104518604}},{"state":{"x":3.106652888069965},"intercept":424305.531624193,"coefficients":{"x":-3907.1723717774585}},{"state":{"x":4.099993936642912},"intercept":420820.4578179415,"coefficients":{"x":-3066.5368729518023}},{"state":{"x":6.799994929404146},"intercept":413400.4738650521,"coefficients":{"x":-2800.3356029858382}},{"state":{"x":7.499995288682326},"intercept":412183.16675211594,"coefficients":{"x":-2660.3188627221552}},{"state":{"x":5.199995636312938},"intercept":418755.72711521515,"coefficients":{"x":-2634.868651033089}},{"state":{"x":2.8999960459777756},"intercept":426621.152919821,"coefficients":{"x":-6516.919944960298}},{"state":{"x":3.599995973775315},"intercept":423936.91683976835,"coefficients":{"x":-3848.066395206297}},{"state":{"x":6.299996023998704},"intercept":416413.8135085766,"coefficients":{"x":-2618.7501686166443}},{"state":{"x":8.99999639420441},"intercept":410127.71618979564,"coefficients":{"x":-2487.8126899122553}},{"state":{"x":6.699996725214312},"intercept":416263.27891381615,"coefficients":{"x":-2363.422123198834}},{"state":{"x":4.3999970864700515},"intercept":422269.94485251256,"coefficients":{"x":-2916.9712543560754}},{"state":{"x":7.099997559443018},"intercept":415895.51380768494,"coefficients":{"x":-2420.541605914129}},{"state":{"x":7.799997913300575},"intercept":414673.72295966296,"coefficients":{"x":-2299.51456615111}},{"state":{"x":5.499998256868536},"intercept":420349.918986963,"coefficients":{"x":-2406.3591742730828}},{"state":{"x":8.199998652397083},"intercept":414384.33788842347,"coefficients":{"x":-2252.207120890494}},{"state":{"x":8.899998991202315},"intercept":413291.4118544336,"coefficients":{"x":-2139.5968024191347}},{"state":{"x":9.599999323081816},"intercept":412288.79211238446,"coefficients":{"x":-2032.6169917468742}},{"state":{"x":7.299999649383809},"intercept":417183.3302281372,"coefficients":{"x":-2049.3376605550866}},{"state":{"x":2.89325943205634},"intercept":428080.8553021631,"coefficients":{"x":-6331.500943275502}},{"state":{"x":2.1932601280157145},"intercept":433842.9120456896,"coefficients":{"x":-14714.57023338262}},{"state":{"x":2.893260045842833},"intercept":428930.33179272653,"coefficients":{"x":-6258.57079755115}},{"state":{"x":5.193259133240781},"intercept":421841.4281746458,"coefficients":{"x":-2247.913887526467}},{"state":{"x":2.893260154573954},"intercept":429205.48216369504,"coefficients":{"x":-6321.453165955178}},{"state":{"x":2.993256355147265},"intercept":428786.1971221138,"coefficients":{"x":-3663.633034617167}},{"state":{"x":5.6932563412111215},"intercept":421208.6504991468,"coefficients":{"x":-2310.7963713368863}},{"state":{"x":3.3932572405980195},"intercept":427797.0505397215,"coefficients":{"x":-3683.54568130759}},{"state":{"x":6.093257260749061},"intercept":420617.0109894186,"coefficients":{"x":-2413.504262948435}},{"state":{"x":6.7932581131327705},"intercept":419203.1226177168,"coefficients":{"x":-2177.509689898363}},{"state":{"x":4.493258915176524},"intercept":424965.59625635215,"coefficients":{"x":-2806.0006891962116}},{"state":{"x":2.1932602368986642},"intercept":435325.8250741329,"coefficients":{"x":-14954.180036262642}},{"state":{"x":2.893260154725778},"intercept":430715.10844239,"coefficients":{"x":-6574.057130511397}},{"state":{"x":2.893260045842841},"intercept":430722.7960150887,"coefficients":{"x":-6574.049711449181}},{"state":{"x":5.193259133392634},"intercept":423571.7148638828,"coefficients":{"x":-2528.111632871106}},{"state":{"x":2.8932601547258066},"intercept":430910.41999903653,"coefficients":{"x":-6486.0028093846795}},{"state":{"x":2.8932601547258185},"intercept":430958.1207326541,"coefficients":{"x":-3804.4697814841993}},{"state":{"x":2.8932601547258203},"intercept":431084.5946608485,"coefficients":{"x":-3804.469844138428}},{"state":{"x":2.893260154725801},"intercept":431107.1777452502,"coefficients":{"x":-4059.217903772454}},{"state":{"x":2.8932600985803965},"intercept":431107.17797510786,"coefficients":{"x":-4059.217936079757}},{"state":{"x":2.7932602384858987},"intercept":431741.7390993699,"coefficients":{"x":-6740.80757353293}},{"state":{"x":1.4932601690510663},"intercept":455550.1163192305,"coefficients":{"x":-41755.19353874161}},{"state":{"x":2.1932600825250086},"intercept":438875.74016196985,"coefficients":{"x":-15227.795414783785}},{"state":{"x":2.893260000352129},"intercept":432191.7989636293,"coefficients":{"x":-6827.452036437791}},{"state":{"x":3.8999953618935206},"intercept":427641.9120125666,"coefficients":{"x":-3912.594975250263}},{"state":{"x":6.599995347958166},"intercept":420452.5180364468,"coefficients":{"x":-2440.1134325941584}},{"state":{"x":7.299996162453287},"intercept":418996.67815396923,"coefficients":{"x":-2318.107736573307}},{"state":{"x":9.999996736964825},"intercept":413247.8613319773,"coefficients":{"x":-2202.2023656439105}},{"state":{"x":7.699996924055818},"intercept":418534.84830536327,"coefficients":{"x":-2167.430758866599}},{"state":{"x":5.399997732115206},"intercept":423928.92308500106,"coefficients":{"x":-2409.055692217847}},{"state":{"x":8.099998690804675},"intercept":418008.4695458964,"coefficients":{"x":-2135.5738417381926}},{"state":{"x":10.7999994310063},"intercept":412713.6797308772,"coefficients":{"x":-2028.7951769603428}},{"state":{"x":11.49999955807327},"intercept":411775.03423116973,"coefficients":{"x":-1927.355447877377}},{"state":{"x":9.199999685140188},"intercept":416331.1536571871,"coefficients":{"x":-1886.11436996755}},{"state":{"x":11.899999849558643},"intercept":411460.5977653666,"coefficients":{"x":-1791.808722049475}},{"state":{"x":14.599999945750248},"intercept":406866.56429212843,"coefficients":{"x":-1702.2183570045859}},{"state":{"x":12.299999898089725},"intercept":410860.22058772005,"coefficients":{"x":-1703.711619634177}},{"state":{"x":6.59697128741026},"intercept":421556.5519981763,"coefficients":{"x":-2310.1367060207426}},{"state":{"x":8.004845448633649},"intercept":418860.15283768025,"coefficients":{"x":-2060.3560591008827}},{"state":{"x":7.669896948629478},"intercept":419701.5814073351,"coefficients":{"x":-2017.3227469216235}},{"state":{"x":10.334948463692383},"intercept":414666.7970408577,"coefficients":{"x":-1845.0438988595401}}],"multi_cuts":[]}] \ No newline at end of file diff --git a/dev/tutorial/decision_hazard/index.html b/dev/tutorial/decision_hazard/index.html index fae144fea..75d95b70c 100644 --- a/dev/tutorial/decision_hazard/index.html +++ b/dev/tutorial/decision_hazard/index.html @@ -74,4 +74,4 @@ end end -train_and_compute_cost(decision_hazard_2)
Cost = $410.0

Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.

Summary

To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.

To create a here-and-now decision, add it as a state variable to the previous stage

In some cases, you'll need to add an additional "first-stage" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.

+train_and_compute_cost(decision_hazard_2)
Cost = $410.0

Now we find that the cost of choosing the thermal generation before observing the inflow adds a much more reasonable cost of $10.

Summary

To summarize, the difference between here-and-now and wait-and-see variables is a modeling choice.

To create a here-and-now decision, add it as a state variable to the previous stage

In some cases, you'll need to add an additional "first-stage" problem to enable the model to choose an optimal value for the here-and-now decision variable. You do not need to do this if the first stage is deterministic. You must make sure that the subproblem is feasible for all possible incoming values of the here-and-now decision variable.

diff --git a/dev/tutorial/example_milk_producer/08bbc6cb.svg b/dev/tutorial/example_milk_producer/08bbc6cb.svg deleted file mode 100644 index 319090923..000000000 --- a/dev/tutorial/example_milk_producer/08bbc6cb.svg +++ /dev/null @@ -1,625 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev/tutorial/example_milk_producer/293d6bc8.svg b/dev/tutorial/example_milk_producer/293d6bc8.svg new file mode 100644 index 000000000..43924160c --- /dev/null +++ b/dev/tutorial/example_milk_producer/293d6bc8.svg @@ -0,0 +1,625 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_milk_producer/51c96f86.svg b/dev/tutorial/example_milk_producer/51c96f86.svg deleted file mode 100644 index e6f384c20..000000000 --- a/dev/tutorial/example_milk_producer/51c96f86.svg +++ /dev/null @@ -1,544 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev/tutorial/example_milk_producer/77e8c057.svg b/dev/tutorial/example_milk_producer/77e8c057.svg new file mode 100644 index 000000000..850e54043 --- /dev/null +++ b/dev/tutorial/example_milk_producer/77e8c057.svg @@ -0,0 +1,544 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_milk_producer/055226c1.svg b/dev/tutorial/example_milk_producer/ee182ae1.svg similarity index 59% rename from dev/tutorial/example_milk_producer/055226c1.svg rename to dev/tutorial/example_milk_producer/ee182ae1.svg index 8ba8c1b87..708b96e9e 100644 --- a/dev/tutorial/example_milk_producer/055226c1.svg +++ b/dev/tutorial/example_milk_producer/ee182ae1.svg @@ -1,148 +1,144 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_milk_producer/index.html b/dev/tutorial/example_milk_producer/index.html index b4b72d018..eb89b2163 100644 --- a/dev/tutorial/example_milk_producer/index.html +++ b/dev/tutorial/example_milk_producer/index.html @@ -18,18 +18,18 @@ end simulator()
12-element Vector{Float64}:
- 4.757210658651313
- 4.669100304511168
- 4.823047802305962
- 5.289499998722235
- 4.982468796691012
- 5.372628183501787
- 5.568575752322855
- 5.422572473001746
- 5.101475368753056
- 5.689607528228311
- 5.339847956553798
- 5.31830846172714

It may be helpful to visualize a number of simulations of the price process:

plot = Plots.plot(
+ 4.0126886398543276
+ 4.529340748809659
+ 5.081647664254853
+ 5.023072248514692
+ 4.671721504440123
+ 4.589353782355633
+ 4.6055874902164655
+ 5.309484235351905
+ 5.506382853598203
+ 5.907959530785486
+ 5.450315312297045
+ 6.058594017244071

It may be helpful to visualize a number of simulations of the price process:

plot = Plots.plot(
     [simulator() for _ in 1:500];
     color = "gray",
     opacity = 0.2,
@@ -38,7 +38,7 @@
     ylabel = "Price [\$/kg]",
     xlims = (1, 12),
     ylims = (3, 9),
-)
Example block output

The prices gradually revert to the mean of $6/kg, and there is high volatility.

We can't incorporate this price process directly into SDDP.jl, but we can fit a SDDP.MarkovianGraph directly from the simulator:

graph = SDDP.MarkovianGraph(simulator; budget = 30, scenarios = 10_000);

Here budget is the number of nodes in the policy graph, and scenarios is the number of simulations to use when estimating the transition probabilities.

The graph contains too many nodes to be show, but we can plot it:

for ((t, price), edges) in graph.nodes
+)
Example block output

The prices gradually revert to the mean of $6/kg, and there is high volatility.

We can't incorporate this price process directly into SDDP.jl, but we can fit a SDDP.MarkovianGraph directly from the simulator:

graph = SDDP.MarkovianGraph(simulator; budget = 30, scenarios = 10_000);

Here budget is the number of nodes in the policy graph, and scenarios is the number of simulations to use when estimating the transition probabilities.

The graph contains too many nodes to be show, but we can plot it:

for ((t, price), edges) in graph.nodes
     for ((t′, price′), probability) in edges
         Plots.plot!(
             plot,
@@ -50,7 +50,7 @@
     end
 end
 
-plot
Example block output

That looks okay. Try changing budget and scenarios to see how different Markovian policy graphs can be created.

Model

Now that we have a Markovian graph, we can build the model. See if you can work out how we arrived at this formulation by reading the background description. Do all the variables and constraints make sense?

model = SDDP.PolicyGraph(
+plot
Example block output

That looks okay. Try changing budget and scenarios to see how different Markovian policy graphs can be created.

Model

Now that we have a Markovian graph, we can build the model. See if you can work out how we arrived at this formulation by reading the background description. Do all the variables and constraints make sense?

model = SDDP.PolicyGraph(
     graph;
     sense = :Max,
     upper_bound = 1e2,
@@ -111,7 +111,7 @@
     end
     return
 end
A policy graph with 30 nodes.
- Node indices: (1, 4.585670379795294), ..., (12, 7.7983653020986425)
+ Node indices: (1, 4.578919383129089), ..., (12, 7.711508952722569)
 

Training a policy

Now we have a model, we train a policy. The SDDP.SimulatorSamplingScheme is used in the forward pass. It generates an out-of-sample sequence of prices using simulator and traverses the closest sequence of nodes in the policy graph. When calling SDDP.parameterize for each subproblem, it uses the new out-of-sample price instead of the price associated with the Markov node.

SDDP.train(
     model;
     time_limit = 20,
@@ -123,7 +123,7 @@
 problem
   nodes           : 30
   state variables : 5
-  scenarios       : 7.61719e+11
+  scenarios       : 9.49219e+11
   existing cuts   : false
 options
   solver          : serial mode
@@ -142,31 +142,31 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1  -1.851068e+01  6.082580e+01  1.253403e+00       162   1
-        57   8.366327e+00  7.920620e+00  2.259916e+00      9234   1
-       106   1.008516e+01  7.898809e+00  3.270930e+00     17172   1
-       150   9.988110e+00  7.884976e+00  4.286971e+00     24300   1
-       189   1.004340e+01  7.884002e+00  5.308165e+00     30618   1
-       227   1.064751e+01  7.883383e+00  6.332887e+00     36774   1
-       263   1.048893e+01  7.882892e+00  7.348292e+00     42606   1
-       296   1.095086e+01  7.882802e+00  8.360027e+00     47952   1
-       330   9.548442e+00  7.882578e+00  9.385061e+00     53460   1
-       472   9.643479e+00  7.881958e+00  1.440997e+01     76464   1
-       591   1.019101e+01  7.881957e+00  1.945661e+01     95742   1
-       604   3.768654e+00  7.881957e+00  2.001620e+01     97848   1
+         1  -3.046519e+01  6.131071e+01  1.256958e+00       162   1
+        63   1.292233e+01  7.905125e+00  2.272572e+00     10206   1
+       112   9.307654e+00  7.896140e+00  3.284606e+00     18144   1
+       156   8.112625e+00  7.894985e+00  4.288534e+00     25272   1
+       199   1.046770e+01  7.894847e+00  5.300903e+00     32238   1
+       234   7.526852e+00  7.893376e+00  6.302604e+00     37908   1
+       269   9.742351e+00  7.893235e+00  7.304276e+00     43578   1
+       303   9.847280e+00  7.893047e+00  8.316021e+00     49086   1
+       336   9.009822e+00  7.892988e+00  9.328115e+00     54432   1
+       478   9.374486e+00  7.892676e+00  1.434819e+01     77436   1
+       599   9.603953e+00  7.892323e+00  1.936455e+01     97038   1
+       613   8.053651e+00  7.892310e+00  2.000001e+01     99306   1
 -------------------------------------------------------------------
 status         : time_limit
-total time (s) : 2.001620e+01
-total solves   : 97848
-best bound     :  7.881957e+00
-simulation ci  :  8.994076e+00 ± 3.177224e-01
+total time (s) : 2.000001e+01
+total solves   : 99306
+best bound     :  7.892310e+00
+simulation ci  :  8.902743e+00 ± 2.734557e-01
 numeric issues : 0
 -------------------------------------------------------------------
Warning

We're intentionally terminating the training early so that the documentation doesn't take too long to build. If you run this example locally, increase the time limit.

Simulating the policy

When simulating the policy, we can also use the SDDP.SimulatorSamplingScheme.

simulations = SDDP.simulate(
     model,
     200,
     Symbol[:x_stock, :u_forward_sell, :u_spot_sell, :u_spot_buy];
     sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
-);

To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:

simulations[1][12][:node_index]
(12, 5.425844537172958)

to the realization of the noise (price, ω) passed to SDDP.parameterize:

simulations[1][12][:noise_term]
(5.037307210234058, 0.15)

Visualizing the policy

Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).

plot = Plots.plot(
+);

To show how the sampling scheme uses the new out-of-sample price instead of the price associated with the Markov node, compare the index of the Markov state visited in stage 12 of the first simulation:

simulations[1][12][:node_index]
(12, 6.153625734406808)

to the realization of the noise (price, ω) passed to SDDP.parameterize:

simulations[1][12][:noise_term]
(6.468135863688155, 0.125)

Visualizing the policy

Finally, we can plot the policy to gain insight (although note that we terminated the training early, so we should run the re-train the policy for more iterations before making too many judgements).

plot = Plots.plot(
     SDDP.publication_plot(simulations; title = "x_stock.out") do data
         return data[:x_stock].out
     end,
@@ -180,4 +180,4 @@
         return data[:u_spot_sell]
     end;
     layout = (2, 2),
-)
Example block output

Next steps

+)Example block output

Next steps

diff --git a/dev/tutorial/example_newsvendor/120873e5.svg b/dev/tutorial/example_newsvendor/120873e5.svg new file mode 100644 index 000000000..1f2f83c0d --- /dev/null +++ b/dev/tutorial/example_newsvendor/120873e5.svg @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_newsvendor/31380ebe.svg b/dev/tutorial/example_newsvendor/31380ebe.svg deleted file mode 100644 index c836d4760..000000000 --- a/dev/tutorial/example_newsvendor/31380ebe.svg +++ /dev/null @@ -1,98 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev/tutorial/example_newsvendor/75368895.svg b/dev/tutorial/example_newsvendor/75368895.svg deleted file mode 100644 index 5d4aa73ea..000000000 --- a/dev/tutorial/example_newsvendor/75368895.svg +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev/tutorial/example_newsvendor/e6131d3b.svg b/dev/tutorial/example_newsvendor/e6131d3b.svg new file mode 100644 index 000000000..60b5f297f --- /dev/null +++ b/dev/tutorial/example_newsvendor/e6131d3b.svg @@ -0,0 +1,100 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_newsvendor/index.html b/dev/tutorial/example_newsvendor/index.html index 01a3857ea..a498410b3 100644 --- a/dev/tutorial/example_newsvendor/index.html +++ b/dev/tutorial/example_newsvendor/index.html @@ -15,7 +15,7 @@ d = sort!(rand(D, N)); Ω = 1:N P = fill(1 / N, N); -StatsPlots.histogram(d; bins = 20, label = "", xlabel = "Demand")Example block output

Kelley's cutting plane algorithm

Kelley's cutting plane algorithm is an iterative method for maximizing concave functions. Given a concave function $f(x)$, Kelley's constructs an outer-approximation of the function at the minimum by a set of first-order Taylor series approximations (called cuts) constructed at a set of points $k = 1,\ldots,K$:

\[\begin{aligned} +StatsPlots.histogram(d; bins = 20, label = "", xlabel = "Demand")Example block output

Kelley's cutting plane algorithm

Kelley's cutting plane algorithm is an iterative method for maximizing concave functions. Given a concave function $f(x)$, Kelley's constructs an outer-approximation of the function at the minimum by a set of first-order Taylor series approximations (called cuts) constructed at a set of points $k = 1,\ldots,K$:

\[\begin{aligned} f^K = \max\limits_{\theta \in \mathbb{R}, x \in \mathbb{R}^N} \;\; & \theta\\ & \theta \le f(x_k) + \nabla f(x_k)^\top (x - x_k),\quad k=1,\ldots,K\\ & \theta \le M, @@ -168,50 +168,50 @@ println(" Added cut: $c") end

Solving iteration k = 1
   xᵏ = -0.0
-  V̅ = 1224.7336737228934
+  V̅ = 1203.729486273285
   V̲ = 0.0
   Added cut: -4.99999999999999 x_out + θ ≤ 0
 Solving iteration k = 2
-  xᵏ = 244.94673474457915
-  V̅ = 734.8402042337351
-  V̲ = 518.9147951178284
-  Added cut: 0.10000000000000007 x_out + θ ≤ 1033.3029380814448
+  xᵏ = 240.74589725465745
+  V̅ = 722.23769176397
+  V̲ = 536.2667025084404
+  Added cut: 0.10000000000000007 x_out + θ ≤ 1041.833086743221
 Solving iteration k = 3
-  xᵏ = 202.60841923165626
-  V̅ = 607.8252576949667
-  V̲ = 563.9161555633034
-  Added cut: -2.4499999999999993 x_out + θ ≤ 472.74236690905957
+  xᵏ = 204.28099740063195
+  V̅ = 612.8429922018938
+  V̲ = 573.8593373043365
+  Added cut: -2.551999999999999 x_out + θ ≤ 461.0962267391901
 Solving iteration k = 4
-  xᵏ = 219.82767496956293
-  V̅ = 571.6648206453626
-  V̲ = 559.1396876662975
-  Added cut: -0.9710000000000003 x_out + θ ≤ 785.3423652099781
+  xᵏ = 218.98071644194238
+  V̅ = 581.9735822151423
+  V̲ = 570.2628701411641
+  Added cut: -0.9710000000000003 x_out + θ ≤ 795.5940273599225
 Solving iteration k = 5
-  xᵏ = 211.3590252203643
-  V̅ = 567.8539282582234
-  V̲ = 564.6111245500526
-  Added cut: -1.7360000000000009 x_out + θ ≤ 620.409907208227
+  xᵏ = 211.57356142993842
+  V̅ = 577.884832648516
+  V̲ = 574.6085985572508
+  Added cut: -1.787000000000001 x_out + θ ≤ 619.6737671418272
 Solving iteration k = 6
-  xᵏ = 206.8172833321678
-  V̅ = 565.8101444085349
-  V̲ = 565.0697989534765
-  Added cut: -2.042000000000001 x_out + θ ≤ 556.3834730535247
+  xᵏ = 207.29090248710784
+  V̅ = 575.5208049120736
+  V̲ = 574.9117561627465
+  Added cut: -2.1440000000000006 x_out + θ ≤ 545.0618662046049
 Solving iteration k = 7
-  xᵏ = 209.23671292386314
-  V̅ = 565.1714149963273
-  V̲ = 565.0342998509071
-  Added cut: -1.8890000000000011 x_out + θ ≤ 588.2595749854542
+  xᵏ = 208.99692139277957
+  V̅ = 575.1574228851653
+  V̲ = 575.0342159563636
+  Added cut: -1.9910000000000012 x_out + θ ≤ 576.9151882488992
 Solving iteration k = 8
-  xᵏ = 208.34053550280586
-  V̅ = 565.1337755446427
-  V̲ = 565.1197230807632
-  Added cut: -1.9910000000000012 x_out + θ ≤ 566.9947879002907
+  xᵏ = 208.19164734833078
+  V̅ = 575.0414634227648
+  V̲ = 575.0272354736022
+  Added cut: -2.042000000000001 x_out + θ ≤ 566.2831862849725
 Solving iteration k = 9
-  xᵏ = 208.0649969954008
-  V̅ = 565.1222029273317
-  V̲ = 565.1222029273335
+  xᵏ = 208.47062674365705
+  V̅ = 575.0389526082065
+  V̲ = 575.0389526082085
 Terminating with near-optimal solution

To get the first-stage solution, we do:

optimize!(model)
-xᵏ = value(x_out)
208.0649969954008

To compute a second-stage solution, we do:

solve_second_stage(xᵏ, 170.0)
(V = 846.19350030046, λ = -0.1, x = 38.064996995400804, u = 170.0)

Policy Graph

Now let's see how we can formulate and train a policy for the two-stage newsvendor problem using SDDP.jl. Under the hood, SDDP.jl implements the exact algorithm that we just wrote by hand.

model = SDDP.LinearPolicyGraph(;
+xᵏ = value(x_out)
208.47062674365705

To compute a second-stage solution, we do:

solve_second_stage(xᵏ, 170.0)
(V = 846.1529373256343, λ = -0.1, x = 38.47062674365705, u = 170.0)

Policy Graph

Now let's see how we can formulate and train a policy for the two-stage newsvendor problem using SDDP.jl. Under the hood, SDDP.jl implements the exact algorithm that we just wrote by hand.

model = SDDP.LinearPolicyGraph(;
     stages = 2,
     sense = :Max,
     upper_bound = 5 * maximum(d),  # The `M` in θ <= M
@@ -261,87 +261,87 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   0.000000e+00  7.348402e+02  6.320953e-03       103   1
-         2   3.666771e+02  6.078253e+02  2.297211e-02       406   1
-         3   6.078253e+02  5.716648e+02  2.764297e-02       509   1
-         4   6.015621e+02  5.678539e+02  1.319020e-01       612   1
-         5   5.035377e+02  5.658101e+02  1.366930e-01       715   1
-         6   6.204518e+02  5.651714e+02  1.413820e-01       818   1
-         7   5.854729e+02  5.651338e+02  1.460829e-01       921   1
-         8   6.250216e+02  5.651222e+02  1.508119e-01      1024   1
-         9   4.580476e+02  5.651222e+02  1.556809e-01      1127   1
-        10   6.241950e+02  5.651222e+02  1.604221e-01      1230   1
-        11   5.019674e+02  5.651222e+02  1.652141e-01      1333   1
-        12   6.241950e+02  5.651222e+02  1.701851e-01      1436   1
-        13   6.241950e+02  5.651222e+02  1.750400e-01      1539   1
-        14   5.879335e+02  5.651222e+02  1.798840e-01      1642   1
-        15   5.364740e+02  5.651222e+02  1.848750e-01      1745   1
-        16   5.477448e+02  5.651222e+02  1.897919e-01      1848   1
-        17   5.392017e+02  5.651222e+02  1.946499e-01      1951   1
-        18   6.039872e+02  5.651222e+02  2.001410e-01      2054   1
-        19   6.241950e+02  5.651222e+02  2.050810e-01      2157   1
-        20   6.241950e+02  5.651222e+02  2.099879e-01      2260   1
-        21   6.241950e+02  5.651222e+02  2.297289e-01      2563   1
-        22   6.058459e+02  5.651222e+02  2.347450e-01      2666   1
-        23   6.241950e+02  5.651222e+02  2.397051e-01      2769   1
-        24   4.741885e+02  5.651222e+02  2.448599e-01      2872   1
-        25   6.241950e+02  5.651222e+02  2.499211e-01      2975   1
-        26   5.392017e+02  5.651222e+02  2.548850e-01      3078   1
-        27   6.241950e+02  5.651222e+02  2.597980e-01      3181   1
-        28   4.741885e+02  5.651222e+02  2.648840e-01      3284   1
-        29   4.741885e+02  5.651222e+02  2.697959e-01      3387   1
-        30   4.824023e+02  5.651222e+02  2.747409e-01      3490   1
-        31   4.339231e+02  5.651222e+02  2.797949e-01      3593   1
-        32   5.879335e+02  5.651222e+02  2.848771e-01      3696   1
-        33   4.017211e+02  5.651222e+02  2.898641e-01      3799   1
-        34   6.241950e+02  5.651222e+02  2.949221e-01      3902   1
-        35   6.241950e+02  5.651222e+02  2.999289e-01      4005   1
-        36   5.631941e+02  5.651222e+02  3.049719e-01      4108   1
-        37   6.241950e+02  5.651222e+02  3.108039e-01      4211   1
-        38   6.241950e+02  5.651222e+02  3.158510e-01      4314   1
-        39   6.241950e+02  5.651222e+02  3.208930e-01      4417   1
-        40   6.241950e+02  5.651222e+02  3.259790e-01      4520   1
+         1   0.000000e+00  7.222377e+02  6.361008e-03       103   1
+         2   4.512787e+02  6.128430e+02  2.346492e-02       406   1
+         3   5.977139e+02  5.819736e+02  2.814698e-02       509   1
+         4   4.017871e+02  5.778848e+02  1.408529e-01       612   1
+         5   6.078771e+02  5.755208e+02  1.456439e-01       715   1
+         6   6.218727e+02  5.751574e+02  1.503499e-01       818   1
+         7   6.269908e+02  5.750415e+02  1.550970e-01       921   1
+         8   6.245749e+02  5.750390e+02  1.598799e-01      1024   1
+         9   5.430265e+02  5.750390e+02  1.647799e-01      1127   1
+        10   6.162024e+02  5.750390e+02  1.697149e-01      1230   1
+        11   5.171242e+02  5.750390e+02  1.745780e-01      1333   1
+        12   6.254119e+02  5.750390e+02  1.794848e-01      1436   1
+        13   6.254119e+02  5.750390e+02  1.844709e-01      1539   1
+        14   6.254119e+02  5.750390e+02  1.893959e-01      1642   1
+        15   5.913270e+02  5.750390e+02  1.944010e-01      1745   1
+        16   5.364626e+02  5.750390e+02  1.994739e-01      1848   1
+        17   6.254119e+02  5.750390e+02  2.043970e-01      1951   1
+        18   6.254119e+02  5.750390e+02  2.093439e-01      2054   1
+        19   4.856748e+02  5.750390e+02  2.143738e-01      2157   1
+        20   6.152163e+02  5.750390e+02  2.194028e-01      2260   1
+        21   6.254119e+02  5.750390e+02  2.399249e-01      2563   1
+        22   4.763439e+02  5.750390e+02  2.450058e-01      2666   1
+        23   6.254119e+02  5.750390e+02  2.499430e-01      2769   1
+        24   5.364626e+02  5.750390e+02  2.548928e-01      2872   1
+        25   5.468930e+02  5.750390e+02  2.599459e-01      2975   1
+        26   6.254119e+02  5.750390e+02  2.648909e-01      3078   1
+        27   5.904198e+02  5.750390e+02  2.699230e-01      3181   1
+        28   6.254119e+02  5.750390e+02  2.749429e-01      3284   1
+        29   6.254119e+02  5.750390e+02  2.801199e-01      3387   1
+        30   6.254119e+02  5.750390e+02  2.852349e-01      3490   1
+        31   5.212098e+02  5.750390e+02  2.902520e-01      3593   1
+        32   6.084504e+02  5.750390e+02  2.954769e-01      3696   1
+        33   6.233432e+02  5.750390e+02  3.005829e-01      3799   1
+        34   5.461888e+02  5.750390e+02  3.056829e-01      3902   1
+        35   6.254119e+02  5.750390e+02  3.107898e-01      4005   1
+        36   5.468930e+02  5.750390e+02  3.159349e-01      4108   1
+        37   6.254119e+02  5.750390e+02  3.210938e-01      4211   1
+        38   6.006553e+02  5.750390e+02  3.262949e-01      4314   1
+        39   6.254119e+02  5.750390e+02  3.315229e-01      4417   1
+        40   6.254119e+02  5.750390e+02  3.367560e-01      4520   1
 -------------------------------------------------------------------
 status         : simulation_stopping
-total time (s) : 3.259790e-01
+total time (s) : 3.367560e-01
 total solves   : 4520
-best bound     :  5.651222e+02
-simulation ci  :  5.521404e+02 ± 3.564836e+01
+best bound     :  5.750390e+02
+simulation ci  :  5.703792e+02 ± 3.367313e+01
 numeric issues : 0
--------------------------------------------------------------------

One way to query the optimal policy is with SDDP.DecisionRule:

first_stage_rule = SDDP.DecisionRule(model; node = 1)
A decision rule for node 1
solution_1 = SDDP.evaluate(first_stage_rule; incoming_state = Dict(:x => 0.0))
(stage_objective = -416.12999399075915, outgoing_state = Dict(:x => 208.06499699537957), controls = Dict{Any, Any}())

Here's the second stage:

second_stage_rule = SDDP.DecisionRule(model; node = 2)
+-------------------------------------------------------------------

One way to query the optimal policy is with SDDP.DecisionRule:

first_stage_rule = SDDP.DecisionRule(model; node = 1)
A decision rule for node 1
solution_1 = SDDP.evaluate(first_stage_rule; incoming_state = Dict(:x => 0.0))
(stage_objective = -416.94125348716875, outgoing_state = Dict(:x => 208.47062674358438), controls = Dict{Any, Any}())

Here's the second stage:

second_stage_rule = SDDP.DecisionRule(model; node = 2)
 solution = SDDP.evaluate(
     second_stage_rule;
     incoming_state = Dict(:x => solution_1.outgoing_state[:x]),
     noise = 170.0,  # A value of d[ω], can be out-of-sample.
     controls_to_record = [:u_sell],
-)
(stage_objective = 846.193500300462, outgoing_state = Dict(:x => 38.06499699537957), controls = Dict(:u_sell => 170.0))

Simulation

Querying the decision rules is tedious. It's often more useful to simulate the policy:

simulations = SDDP.simulate(
+)
(stage_objective = 846.1529373256416, outgoing_state = Dict(:x => 38.470626743584376), controls = Dict(:u_sell => 170.0))

Simulation

Querying the decision rules is tedious. It's often more useful to simulate the policy:

simulations = SDDP.simulate(
     model,
     10,  #= number of replications =#
     [:x, :u_sell, :u_make];  #= variables to record =#
     skip_undefined_variables = true,
 );

simulations is a vector with 10 elements

length(simulations)
10

and each element is a vector with two elements (one for each stage)

length(simulations[1])
2

The first stage contains:

simulations[1][1]
Dict{Symbol, Any} with 9 entries:
-  :u_make          => 208.065
-  :bellman_term    => 981.252
+  :u_make          => 208.471
+  :bellman_term    => 991.98
   :noise_term      => nothing
   :node_index      => 1
-  :stage_objective => -416.13
+  :stage_objective => -416.941
   :objective_state => nothing
   :u_sell          => NaN
   :belief          => Dict(1=>1.0)
-  :x               => State{Float64}(0.0, 208.065)

The second stage contains:

simulations[1][2]
Dict{Symbol, Any} with 9 entries:
+  :x               => State{Float64}(0.0, 208.471)

The second stage contains:

simulations[1][2]
Dict{Symbol, Any} with 9 entries:
   :u_make          => NaN
   :bellman_term    => 0.0
-  :noise_term      => 190.865
+  :noise_term      => 206.471
   :node_index      => 2
-  :stage_objective => 952.604
+  :stage_objective => 1032.16
   :objective_state => nothing
-  :u_sell          => 190.865
+  :u_sell          => 206.471
   :belief          => Dict(2=>1.0)
-  :x               => State{Float64}(208.065, 17.2002)

We can compute aggregated statistics across the simulations:

objectives = map(simulations) do simulation
+  :x               => State{Float64}(208.471, 1.99914)

We can compute aggregated statistics across the simulations:

objectives = map(simulations) do simulation
     return sum(data[:stage_objective] for data in simulation)
 end
 μ, t = SDDP.confidence_interval(objectives)
-println("Simulation ci : $μ ± $t")
Simulation ci : 542.255113055428 ± 45.095438727318005

Risk aversion revisited

SDDP.jl contains a number of risk measures. One example is:

0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()
A convex combination of 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()

You can construct a risk-averse policy by passing a risk measure to the risk_measure keyword argument of SDDP.train.

We can explore how the optimal decision changes with risk by creating a function:

function solve_newsvendor(risk_measure::SDDP.AbstractRiskMeasure)
+println("Simulation ci : $μ ± $t")
Simulation ci : 551.346084151388 ± 59.46207217081107

Risk aversion revisited

SDDP.jl contains a number of risk measures. One example is:

0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()
A convex combination of 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()

You can construct a risk-averse policy by passing a risk measure to the risk_measure keyword argument of SDDP.train.

We can explore how the optimal decision changes with risk by creating a function:

function solve_newsvendor(risk_measure::SDDP.AbstractRiskMeasure)
     model = SDDP.LinearPolicyGraph(;
         stages = 2,
         sense = :Max,
@@ -367,7 +367,7 @@
     first_stage_rule = SDDP.DecisionRule(model; node = 1)
     solution = SDDP.evaluate(first_stage_rule; incoming_state = Dict(:x => 0.0))
     return solution.outgoing_state[:x]
-end
solve_newsvendor (generic function with 1 method)

Now we can see how many units a decision maker would order using CVaR:

solve_newsvendor(SDDP.CVaR(0.4))
187.2377407854791

as well as a decision-maker who cares only about the worst-case outcome:

solve_newsvendor(SDDP.WorstCase())
158.0733948180827

In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.

Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter $\gamma$:

Γ = [10^i for i in -4:0.5:1]
+end
solve_newsvendor (generic function with 1 method)

Now we can see how many units a decision maker would order using CVaR:

solve_newsvendor(SDDP.CVaR(0.4))
191.07353293854905

as well as a decision-maker who cares only about the worst-case outcome:

solve_newsvendor(SDDP.WorstCase())
157.9347845819746

In general, the decision-maker will be somewhere between the two extremes. The SDDP.Entropic risk measure is a risk measure that has a single parameter that lets us explore the space of policies between the two extremes. When the parameter is small, the measure acts like SDDP.Expectation, and when it is large, it acts like SDDP.WorstCase.

Here is what we get if we solve our problem multiple times for different values of the risk aversion parameter $\gamma$:

Γ = [10^i for i in -4:0.5:1]
 buy = [solve_newsvendor(SDDP.Entropic(γ)) for γ in Γ]
 Plots.plot(
     Γ,
@@ -376,4 +376,4 @@
     xlabel = "Risk aversion parameter γ",
     ylabel = "Number of pies to make",
     legend = false,
-)
Example block output

Things to try

There are a number of things you can try next:

+)Example block output

Things to try

There are a number of things you can try next:

diff --git a/dev/tutorial/example_reservoir/0e006628.svg b/dev/tutorial/example_reservoir/0e006628.svg new file mode 100644 index 000000000..7156f2a1c --- /dev/null +++ b/dev/tutorial/example_reservoir/0e006628.svg @@ -0,0 +1,86 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/c70d804a.svg b/dev/tutorial/example_reservoir/3ebc5c2b.svg similarity index 84% rename from dev/tutorial/example_reservoir/c70d804a.svg rename to dev/tutorial/example_reservoir/3ebc5c2b.svg index e7558b29e..f71a831bd 100644 --- a/dev/tutorial/example_reservoir/c70d804a.svg +++ b/dev/tutorial/example_reservoir/3ebc5c2b.svg @@ -1,52 +1,52 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/b2cec97b.svg b/dev/tutorial/example_reservoir/6352eda2.svg similarity index 85% rename from dev/tutorial/example_reservoir/b2cec97b.svg rename to dev/tutorial/example_reservoir/6352eda2.svg index fb5cc11e1..ab9867eb7 100644 --- a/dev/tutorial/example_reservoir/b2cec97b.svg +++ b/dev/tutorial/example_reservoir/6352eda2.svg @@ -1,46 +1,46 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/f6caca7e.svg b/dev/tutorial/example_reservoir/99239794.svg similarity index 84% rename from dev/tutorial/example_reservoir/f6caca7e.svg rename to dev/tutorial/example_reservoir/99239794.svg index b8e9739fb..b7529d856 100644 --- a/dev/tutorial/example_reservoir/f6caca7e.svg +++ b/dev/tutorial/example_reservoir/99239794.svg @@ -1,52 +1,52 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/1e774439.svg b/dev/tutorial/example_reservoir/b237d57b.svg similarity index 85% rename from dev/tutorial/example_reservoir/1e774439.svg rename to dev/tutorial/example_reservoir/b237d57b.svg index 234d7c16f..34aca92c9 100644 --- a/dev/tutorial/example_reservoir/1e774439.svg +++ b/dev/tutorial/example_reservoir/b237d57b.svg @@ -1,46 +1,46 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/be9e547c.svg b/dev/tutorial/example_reservoir/b70cc688.svg similarity index 84% rename from dev/tutorial/example_reservoir/be9e547c.svg rename to dev/tutorial/example_reservoir/b70cc688.svg index 050de1516..8d82e9eb0 100644 --- a/dev/tutorial/example_reservoir/be9e547c.svg +++ b/dev/tutorial/example_reservoir/b70cc688.svg @@ -1,109 +1,109 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/6735fb31.svg b/dev/tutorial/example_reservoir/ccc1a9fc.svg similarity index 72% rename from dev/tutorial/example_reservoir/6735fb31.svg rename to dev/tutorial/example_reservoir/ccc1a9fc.svg index 3d40cd748..77c111714 100644 --- a/dev/tutorial/example_reservoir/6735fb31.svg +++ b/dev/tutorial/example_reservoir/ccc1a9fc.svg @@ -1,148 +1,148 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/example_reservoir/e5433df1.svg b/dev/tutorial/example_reservoir/e5433df1.svg deleted file mode 100644 index cc96b67bb..000000000 --- a/dev/tutorial/example_reservoir/e5433df1.svg +++ /dev/null @@ -1,86 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev/tutorial/example_reservoir/index.html b/dev/tutorial/example_reservoir/index.html index fad9e2a6e..fb6ba9e52 100644 --- a/dev/tutorial/example_reservoir/index.html +++ b/dev/tutorial/example_reservoir/index.html @@ -9,7 +9,7 @@ import DataFrames import HiGHS import Plots

Data

First, we need some data for the problem. For this tutorial, we'll write CSV files to a temporary directory from Julia. If you have an existing file, you could change the filename to point to that instead.

dir = mktempdir()
-filename = joinpath(dir, "example_reservoir.csv")
"/tmp/jl_v2lKk9/example_reservoir.csv"

Here is the data

csv_data = """
+filename = joinpath(dir, "example_reservoir.csv")
"/tmp/jl_34ylIT/example_reservoir.csv"

Here is the data

csv_data = """
 week,inflow,demand,cost
 1,3,7,10.2\n2,2,7.1,10.4\n3,3,7.2,10.6\n4,2,7.3,10.9\n5,3,7.4,11.2\n
 6,2,7.6,11.5\n7,3,7.8,11.9\n8,2,8.1,12.3\n9,3,8.3,12.7\n10,2,8.6,13.1\n
@@ -29,7 +29,7 @@
     Plots.plot(data[!, :cost]; ylabel = "Cost", xlabel = "Week");
     layout = (3, 1),
     legend = false,
-)
Example block output

The number of weeks will be useful later:

T = size(data, 1)
52

Deterministic JuMP model

To start, we construct a deterministic model in pure JuMP.

Create a JuMP model, using HiGHS as the optimizer:

model = Model(HiGHS.Optimizer)
+)
Example block output

The number of weeks will be useful later:

T = size(data, 1)
52

Deterministic JuMP model

To start, we construct a deterministic model in pure JuMP.

Create a JuMP model, using HiGHS as the optimizer:

model = Model(HiGHS.Optimizer)
 set_silent(model)

x_storage[t]: the amount of water in the reservoir at the start of stage t:

reservoir_max = 320.0
 @variable(model, 0 <= x_storage[1:T+1] <= reservoir_max)
53-element Vector{VariableRef}:
  x_storage[1]
@@ -197,13 +197,13 @@
   Dual objective value : 6.82910e+02
 
 * Work counters
-  Solve time (sec)   : 8.00610e-04
+  Solve time (sec)   : 8.95500e-04
   Simplex iterations : 53
   Barrier iterations : 0
   Node count         : -1
 

The total cost is:

objective_value(model)
682.9099999999999

Here's a plot of demand and generation:

Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
 Plots.plot!(value.(u_thermal); label = "Thermal")
-Plots.plot!(value.(u_flow); label = "Hydro")
Example block output

And here's the storage over time:

Plots.plot(value.(x_storage); label = "Storage", xlabel = "Week")
Example block output

Deterministic SDDP model

For the next step, we show how to decompose our JuMP model into SDDP.jl. It should obtain the same solution.

model = SDDP.LinearPolicyGraph(;
+Plots.plot!(value.(u_flow); label = "Hydro")
Example block output

And here's the storage over time:

Plots.plot(value.(x_storage); label = "Storage", xlabel = "Week")
Example block output

Deterministic SDDP model

For the next step, we show how to decompose our JuMP model into SDDP.jl. It should obtain the same solution.

model = SDDP.LinearPolicyGraph(;
     stages = T,
     sense = :Min,
     lower_bound = 0.0,
@@ -252,11 +252,11 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   1.079600e+03  3.157700e+02  4.220796e-02       104   1
-        10   6.829100e+02  6.829100e+02  1.370349e-01      1040   1
+         1   1.079600e+03  3.157700e+02  4.417706e-02       104   1
+        10   6.829100e+02  6.829100e+02  1.409280e-01      1040   1
 -------------------------------------------------------------------
 status         : iteration_limit
-total time (s) : 1.370349e-01
+total time (s) : 1.409280e-01
 total solves   : 1040
 best bound     :  6.829100e+02
 simulation ci  :  7.289889e+02 ± 7.726064e+01
@@ -279,9 +279,9 @@
 
 Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
 Plots.plot!(r_sim; label = "Thermal")
-Plots.plot!(u_sim; label = "Hydro")
Example block output

Perfect. That's the same as we got before.

Now let's look at x_storage. This is a little more complicated, because we need to grab the outgoing value of the state variable in each stage:

x_sim = [sim[:x_storage].out for sim in simulations[1]]
+Plots.plot!(u_sim; label = "Hydro")
Example block output

Perfect. That's the same as we got before.

Now let's look at x_storage. This is a little more complicated, because we need to grab the outgoing value of the state variable in each stage:

x_sim = [sim[:x_storage].out for sim in simulations[1]]
 
-Plots.plot(x_sim; label = "Storage", xlabel = "Week")
Example block output

Stochastic SDDP model

Now we add some randomness to our model. In each stage, we assume that the inflow could be: 2 units lower, with 30% probability; the same as before, with 40% probability; or 5 units higher, with 30% probability.

model = SDDP.LinearPolicyGraph(;
+Plots.plot(x_sim; label = "Storage", xlabel = "Week")
Example block output

Stochastic SDDP model

Now we add some randomness to our model. In each stage, we assume that the inflow could be: 2 units lower, with 30% probability; the same as before, with 40% probability; or 5 units higher, with 30% probability.

model = SDDP.LinearPolicyGraph(;
     stages = T,
     sense = :Min,
     lower_bound = 0.0,
@@ -335,23 +335,23 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   0.000000e+00  0.000000e+00  4.334593e-02       208   1
-        47   7.257579e+02  2.476231e+02  1.059568e+00      9776   1
-        87   3.281183e+02  2.639688e+02  2.084431e+00     18096   1
-       100   7.140000e+01  2.678261e+02  2.428722e+00     20800   1
+         1   5.630100e+02  1.043051e+02  4.839897e-02       208   1
+        45   6.671316e+02  2.504700e+02  1.066016e+00      9360   1
+        82   9.516355e+01  2.659068e+02  2.071876e+00     17056   1
+       100   1.130319e+02  2.699701e+02  2.597528e+00     20800   1
 -------------------------------------------------------------------
 status         : iteration_limit
-total time (s) : 2.428722e+00
+total time (s) : 2.597528e+00
 total solves   : 20800
-best bound     :  2.678261e+02
-simulation ci  :  3.064557e+02 ± 4.472909e+01
+best bound     :  2.699701e+02
+simulation ci  :  2.878926e+02 ± 4.042203e+01
 numeric issues : 0
 -------------------------------------------------------------------

Now simulate the policy. This time we do 100 replications because the policy is now stochastic instead of deterministic:

simulations =
     SDDP.simulate(model, 100, [:x_storage, :u_flow, :u_thermal, :ω_inflow]);

And let's plot the use of thermal generation in each replication:

plot = Plots.plot(data[!, :demand]; label = "Demand", xlabel = "Week")
 for simulation in simulations
     Plots.plot!(plot, [sim[:u_thermal] for sim in simulation]; label = "")
 end
-plot
Example block output

Viewing an interpreting static plots like this is difficult, particularly as the number of simulations grows. SDDP.jl includes an interactive SpaghettiPlot that makes things easier:

plot = SDDP.SpaghettiPlot(simulations)
+plot
Example block output

Viewing an interpreting static plots like this is difficult, particularly as the number of simulations grows. SDDP.jl includes an interactive SpaghettiPlot that makes things easier:

plot = SDDP.SpaghettiPlot(simulations)
 SDDP.add_spaghetti(plot; title = "Storage") do sim
     return sim[:x_storage].out
 end
@@ -427,42 +427,43 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   5.255687e+04  3.677313e+04  2.121270e-01      2291   1
-         4   2.083487e+05  8.746115e+04  1.363061e+00     14988   1
-         6   3.223610e+05  9.276224e+04  3.593576e+00     35586   1
-        11   1.201300e+05  9.320034e+04  4.862322e+00     45793   1
-        13   1.666789e+05  9.326220e+04  6.751442e+00     59527   1
-        16   1.716321e+05  9.331955e+04  8.701844e+00     72848   1
-        22   5.702629e+04  9.335642e+04  1.392841e+01    103442   1
-        26   1.215511e+05  9.336760e+04  1.929000e+01    130286   1
-        30   3.044993e+05  9.337416e+04  2.735747e+01    165450   1
-        34   3.617240e+05  9.337718e+04  3.623241e+01    196870   1
-        37   1.084632e+05  9.337911e+04  4.212221e+01    217055   1
-        43   3.617192e+05  9.338103e+04  5.188009e+01    248481   1
-        48   1.298353e+05  9.338456e+04  5.701967e+01    264096   1
-        52   1.391148e+05  9.338625e+04  6.280596e+01    280748   1
-        55   8.306582e+04  9.338686e+04  6.868203e+01    296981   1
-        56   3.203149e+05  9.338696e+04  7.385514e+01    310712   1
-        62   2.691538e+05  9.338806e+04  8.324105e+01    334858   1
-        65   6.040284e+04  9.338883e+04  8.839991e+01    347555   1
-        66   2.919177e+05  9.338926e+04  9.370320e+01    360246   1
-        71   2.145927e+05  9.339060e+04  1.019890e+02    379397   1
-        73   1.372034e+05  9.339070e+04  1.086500e+02    394379   1
-        77   7.365508e+04  9.339272e+04  1.142400e+02    406663   1
-        97   8.616771e+04  9.339489e+04  1.455912e+02    470787   1
-       100   3.928315e+04  9.339497e+04  1.527235e+02    484524   1
+         1   2.804100e+02  5.890209e+01  4.986382e-02       211   1
+         5   1.118528e+05  7.769672e+04  1.130307e+00     12079   1
+         7   3.074107e+05  9.149884e+04  2.485533e+00     25813   1
+        11   1.842863e+04  9.271590e+04  3.586766e+00     36225   1
+        15   1.552078e+04  9.312341e+04  4.630039e+00     44557   1
+        17   3.544782e+05  9.317381e+04  7.012162e+00     61619   1
+        19   1.336219e+05  9.322522e+04  8.111396e+00     69113   1
+        22   9.964271e+04  9.325388e+04  9.623285e+00     79314   1
+        30   4.765079e+04  9.335391e+04  1.462770e+01    108874   1
+        41   3.326940e+05  9.336278e+04  2.222214e+01    146763   1
+        43   1.532419e+05  9.336628e+04  2.868801e+01    170481   1
+        48   2.777906e+05  9.337515e+04  3.601744e+01    197536   1
+        52   1.211977e+05  9.337753e+04  4.139290e+01    216476   1
+        54   3.936783e+05  9.337783e+04  4.684129e+01    235202   1
+        60   2.848994e+05  9.338138e+04  5.289324e+01    255396   1
+        67   1.725143e+05  9.338381e+04  5.942574e+01    276009   1
+        70   1.669101e+05  9.338524e+04  6.451532e+01    291202   1
+        73   7.539478e+04  9.338579e+04  7.037460e+01    307643   1
+        80   6.868808e+04  9.338758e+04  7.620151e+01    323264   1
+        84   8.020504e+04  9.338788e+04  8.185919e+01    337836   1
+        88   7.946325e+04  9.338853e+04  8.784756e+01    352824   1
+        91   1.505061e+05  9.338979e+04  9.512746e+01    370513   1
+        96   2.513012e+05  9.339043e+04  1.032188e+02    389456   1
+        98   1.736876e+05  9.339087e+04  1.108766e+02    406726   1
+       100   2.943117e+04  9.339143e+04  1.129484e+02    411308   1
 -------------------------------------------------------------------
 status         : iteration_limit
-total time (s) : 1.527235e+02
-total solves   : 484524
-best bound     :  9.339497e+04
-simulation ci  :  1.095319e+05 ± 1.998967e+04
+total time (s) : 1.129484e+02
+total solves   : 411308
+best bound     :  9.339143e+04
+simulation ci  :  9.239141e+04 ± 1.767246e+04
 numeric issues : 0
 -------------------------------------------------------------------

When we simulate now, each trajectory will be a different length, because each cycle has a 95% probability of continuing and a 5% probability of stopping.

simulations = SDDP.simulate(model, 3);
 length.(simulations)
3-element Vector{Int64}:
-  780
- 1300
-  780

We can simulate a fixed number of cycles by passing a sampling_scheme:

simulations = SDDP.simulate(
+ 1248
+ 2756
+ 1508

We can simulate a fixed number of cycles by passing a sampling_scheme:

simulations = SDDP.simulate(
     model,
     100,
     [:x_storage, :u_flow];
@@ -499,4 +500,4 @@
         return sim[:u_flow]
     end;
     layout = (2, 1),
-)
Example block output

Next steps

Our model is very basic. There are many aspects that we could improve:

  • Can you add a second reservoir to make a river chain?

  • Can you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?

+)Example block output

Next steps

Our model is very basic. There are many aspects that we could improve:

  • Can you add a second reservoir to make a river chain?

  • Can you modify the problem and data to use proper units, including a conversion between the volume of water flowing through the turbine and the electrical power output?

diff --git a/dev/tutorial/first_steps/index.html b/dev/tutorial/first_steps/index.html index 51d5ad23f..9aaee0877 100644 --- a/dev/tutorial/first_steps/index.html +++ b/dev/tutorial/first_steps/index.html @@ -228,14 +228,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.500000e+04 2.500000e+03 3.380060e-03 12 1 - 10 1.250000e+04 8.333333e+03 1.312304e-02 120 1 + 1 1.250000e+04 2.500000e+03 3.525019e-03 12 1 + 10 7.500000e+03 8.333333e+03 1.363301e-02 120 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 1.312304e-02 +total time (s) : 1.363301e-02 total solves : 120 best bound : 8.333333e+03 -simulation ci : 8.250000e+03 ± 4.324479e+03 +simulation ci : 9.250000e+03 ± 2.428125e+03 numeric issues : 0 -------------------------------------------------------------------

There's a lot going on in this printout! Let's break it down.

The first section, "problem," gives some problem statistics. In this example there are 3 nodes, 1 state variable, and 27 scenarios ($3^3$). We haven't solved this problem before so there are no existing cuts.

The "options" section lists some options we are using to solve the problem. For more information on the numerical stability report, read the Numerical stability report section.

The "subproblem structure" section also needs explaining. This looks at all of the nodes in the policy graph and reports the minimum and maximum number of variables and each constraint type in the corresponding subproblem. In this case each subproblem has 7 variables and various numbers of different constraint types. Note that the exact numbers may not correspond to the formulation as you wrote it, because SDDP.jl adds some extra variables for the cost-to-go function.

Then comes the iteration log, which is the main part of the printout. It has the following columns:

  • iteration: the SDDP iteration
  • simulation: the cost of the single forward pass simulation for that iteration. This value is stochastic and is not guaranteed to improve over time. However, it's useful to check that the units are reasonable, and that it is not deterministic if you intended for the problem to be stochastic, etc.
  • bound: this is a lower bound (upper if maximizing) for the value of the optimal policy. This bound should be monotonically improving (increasing if minimizing, decreasing if maximizing), but in some cases it can temporarily worsen due to cut selection, especially in the early iterations of the algorithm.
  • time (s): the total number of seconds spent solving so far
  • solves: the total number of subproblem solves to date. This can be very large!
  • pid: the ID of the processor used to solve that iteration. This should be 1 unless you are using parallel computation.

In addition, if the first character of a line is , then SDDP.jl experienced numerical issues during the solve, but successfully recovered.

The printout finishes with some summary statistics:

  • status: why did the solver stop?
  • total time (s), best bound, and total solves are the values from the last iteration of the solve.
  • simulation ci: a confidence interval that estimates the quality of the policy from the Simulation column.
  • numeric issues: the number of iterations that experienced numerical issues.
Warning

The simulation ci result can be misleading if you run a small number of iterations, or if the initial simulations are very bad. On a more technical note, it is an in-sample simulation, which may not reflect the true performance of the policy. See Obtaining bounds for more details.

Obtaining the decision rule

After training a policy, we can create a decision rule using SDDP.DecisionRule:

rule = SDDP.DecisionRule(model; node = 1)
A decision rule for node 1

Then, to evaluate the decision rule, we use SDDP.evaluate:

solution = SDDP.evaluate(
     rule;
@@ -254,10 +254,10 @@
 replication = 1
 stage = 2
 simulations[replication][stage]
Dict{Symbol, Any} with 10 entries:
-  :volume             => State{Float64}(200.0, 100.0)
+  :volume             => State{Float64}(200.0, 150.0)
   :hydro_spill        => 0.0
-  :bellman_term       => 2500.0
-  :noise_term         => 50.0
+  :bellman_term       => 0.0
+  :noise_term         => 100.0
   :node_index         => 2
   :stage_objective    => 0.0
   :objective_state    => nothing
@@ -267,18 +267,18 @@
     return node[:volume].out
 end
3-element Vector{Float64}:
  200.0
- 100.0
-  50.0

Another is :thermal_generation.

thermal_generation = map(simulations[1]) do node
+ 150.0
+  -0.0

Another is :thermal_generation.

thermal_generation = map(simulations[1]) do node
     return node[:thermal_generation]
 end
3-element Vector{Float64}:
- 150.0
-   0.0
-   0.0

Obtaining bounds

Because the optimal policy is stochastic, one common approach to quantify the quality of the policy is to construct a confidence interval for the expected cost by summing the stage objectives along each simulation.

objectives = map(simulations) do simulation
+ 50.0
+  0.0
+  0.0

Obtaining bounds

Because the optimal policy is stochastic, one common approach to quantify the quality of the policy is to construct a confidence interval for the expected cost by summing the stage objectives along each simulation.

objectives = map(simulations) do simulation
     return sum(stage[:stage_objective] for stage in simulation)
 end
 
 μ, ci = SDDP.confidence_interval(objectives)
-println("Confidence interval: ", μ, " ± ", ci)
Confidence interval: 8400.0 ± 930.5440137984672

This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.

println("Lower bound: ", SDDP.calculate_bound(model))
Lower bound: 8333.333333333332
Tip

The upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.

Custom recorders

In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.

For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:

simulations = SDDP.simulate(
+println("Confidence interval: ", μ, " ± ", ci)
Confidence interval: 8550.0 ± 1000.0385649129394

This confidence interval is an estimate for an upper bound of the policy's quality. We can calculate the lower bound using SDDP.calculate_bound.

println("Lower bound: ", SDDP.calculate_bound(model))
Lower bound: 8333.333333333332
Tip

The upper- and lower-bounds are reversed if maximizing, i.e., SDDP.calculate_bound. returns an upper bound.

Custom recorders

In addition to simulating the primal values of variables, we can also pass custom recorder functions. Each of these functions takes one argument, the JuMP subproblem corresponding to each node. This function gets called after we have solved each node as we traverse the policy graph in the simulation.

For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go:

simulations = SDDP.simulate(
     model,
     1;  ## Perform a single simulation
     custom_recorders = Dict{Symbol,Function}(
@@ -291,4 +291,4 @@
 end
3-element Vector{Float64}:
   50.0
   50.0
- 150.0

Extracting the marginal water values

Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.

Note

By "value function" we mean $\mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$, not the function $V_i(x, \omega)$.

First, we construct a value function from the first subproblem:

V = SDDP.ValueFunction(model; node = 1)
A value function for node 1

Then we can evaluate V at a point:

cost, price = SDDP.evaluate(V, Dict("volume" => 10))
(21499.999999999996, Dict(:volume => -99.99999999999999))

This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.

+ 150.0

Extracting the marginal water values

Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.

Note

By "value function" we mean $\mathbb{E}_{j \in i^+, \varphi \in \Omega_j}[V_j(x^\prime, \varphi)]$, not the function $V_i(x, \omega)$.

First, we construct a value function from the first subproblem:

V = SDDP.ValueFunction(model; node = 1)
A value function for node 1

Then we can evaluate V at a point:

cost, price = SDDP.evaluate(V, Dict("volume" => 10))
(21499.999999999996, Dict(:volume => -99.99999999999999))

This returns the cost-to-go (cost), and the gradient of the cost-to-go function with respect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the expected long-run cost.

diff --git a/dev/tutorial/inventory/1851bc25.svg b/dev/tutorial/inventory/273b59b4.svg similarity index 84% rename from dev/tutorial/inventory/1851bc25.svg rename to dev/tutorial/inventory/273b59b4.svg index 1ef25e2a1..d2328ddce 100644 --- a/dev/tutorial/inventory/1851bc25.svg +++ b/dev/tutorial/inventory/273b59b4.svg @@ -1,57 +1,57 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/inventory/98af5933.svg b/dev/tutorial/inventory/741393b0.svg similarity index 84% rename from dev/tutorial/inventory/98af5933.svg rename to dev/tutorial/inventory/741393b0.svg index eede4065a..37ea7817d 100644 --- a/dev/tutorial/inventory/98af5933.svg +++ b/dev/tutorial/inventory/741393b0.svg @@ -1,51 +1,51 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/inventory/index.html b/dev/tutorial/inventory/index.html index 882e1f195..a04180ae6 100644 --- a/dev/tutorial/inventory/index.html +++ b/dev/tutorial/inventory/index.html @@ -72,23 +72,24 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.730368e+05 4.573582e+04 2.103090e-02 212 1 - 56 1.333763e+05 1.443352e+05 1.031628e+00 15172 1 - 113 1.623866e+05 1.443371e+05 2.033933e+00 29456 1 - 174 1.567026e+05 1.443373e+05 3.041980e+00 42388 1 - 232 1.228079e+05 1.443373e+05 4.057865e+00 55784 1 - 286 1.627237e+05 1.443373e+05 5.051593e+00 67232 1 + 1 3.418789e+05 4.573582e+04 1.870894e-02 212 1 + 56 1.312713e+05 1.443368e+05 1.032509e+00 15172 1 + 112 1.216711e+05 1.443373e+05 2.050640e+00 29244 1 + 171 1.153553e+05 1.443373e+05 3.061592e+00 41752 1 + 224 1.020500e+05 1.443373e+05 4.067351e+00 54088 1 + 280 1.890395e+05 1.443374e+05 5.074809e+00 65960 1 + 286 1.510184e+05 1.443374e+05 5.174048e+00 67232 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 5.051593e+00 +total time (s) : 5.174048e+00 total solves : 67232 -best bound : 1.443373e+05 -simulation ci : 1.479508e+05 ± 3.505903e+03 +best bound : 1.443374e+05 +simulation ci : 1.445408e+05 ± 3.345188e+03 numeric issues : 0 ------------------------------------------------------------------- -Confidence interval: 142289.58 ± 3920.19 -Lower bound: 144337.34

Plot the optimal inventory levels:

plt = SDDP.publication_plot(
+Confidence interval: 138958.63 ± 3723.42
+Lower bound: 144337.44

Plot the optimal inventory levels:

plt = SDDP.publication_plot(
     simulations;
     title = "x_inventory.out + u_buy.out",
     xlabel = "Stage",
@@ -96,7 +97,7 @@
     ylims = (0, 1_000),
 ) do data
     return data[:x_inventory].out + data[:u_buy].out
-end
Example block output

In the early stages, we indeed recover an order-up-to policy. However, there are end-of-horizon effects as the agent tries to optimize their decision making knowing that they have 10 realizations of demand.

Infinite horizon

We can remove the end-of-horizonn effects by considering an infinite horizon model. We assume a discount factor $\alpha=0.95$:

α = 0.95
+end
Example block output

In the early stages, we indeed recover an order-up-to policy. However, there are end-of-horizon effects as the agent tries to optimize their decision making knowing that they have 10 realizations of demand.

Infinite horizon

We can remove the end-of-horizonn effects by considering an infinite horizon model. We assume a discount factor $\alpha=0.95$:

α = 0.95
 graph = SDDP.LinearGraph(2)
 SDDP.add_edge(graph, 2 => 2, α)
 graph
Root
@@ -166,30 +167,28 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   3.247832e+06  4.707504e+04  4.018521e-02       694   1
-        34   2.175489e+05  3.061820e+05  1.046095e+00     14692   1
-        63   5.619092e+05  3.122724e+05  2.067367e+00     26145   1
-        83   8.071607e+05  3.126091e+05  3.070845e+00     36833   1
-       104   4.313111e+05  3.126584e+05  4.127327e+00     46955   1
-       123   2.815007e+05  3.126644e+05  5.128183e+00     55374   1
-       144   1.348958e+06  3.126649e+05  6.350076e+00     65286   1
-       163   1.631869e+05  3.126650e+05  7.369768e+00     72991   1
-       174   4.737552e+05  3.126650e+05  8.414378e+00     80163   1
-       190   7.722816e+05  3.126650e+05  9.479770e+00     86899   1
-       247   4.887447e+05  3.126650e+05  1.456315e+01    113395   1
-       279   1.116618e+06  3.126650e+05  1.971491e+01    132012   1
-       312   6.087447e+05  3.126650e+05  2.504867e+01    145800   1
-       332   6.159868e+05  3.126650e+05  3.049544e+01    157727   1
-       355   1.869292e+06  3.126650e+05  3.630772e+01    168586   1
-       374   1.113461e+06  3.126650e+05  4.220300e+01    177530   1
-       394   9.391868e+05  3.126650e+05  4.828403e+01    185341   1
-       400   1.731237e+05  3.126650e+05  5.038249e+01    187951   1
+         1   1.443632e+05  4.240397e+04  1.039004e-02       148   1
+        30   9.357057e+05  3.038036e+05  1.093769e+00     13932   1
+        61   1.145322e+06  3.122381e+05  2.215047e+00     26794   1
+        82   1.318644e+05  3.126027e+05  3.224195e+00     36979   1
+       105   5.981819e+05  3.126573e+05  4.285995e+00     46242   1
+       135   1.789341e+05  3.126647e+05  5.294752e+00     54924   1
+       160   2.031025e+05  3.126650e+05  6.322740e+00     63055   1
+       175   1.625131e+05  3.126650e+05  7.328955e+00     70336   1
+       188   1.318724e+06  3.126650e+05  8.548523e+00     78224   1
+       202   1.114513e+06  3.126650e+05  9.598627e+00     83866   1
+       259   4.654184e+05  3.126650e+05  1.460564e+01    109270   1
+       307   6.329132e+05  3.126650e+05  1.994067e+01    128491   1
+       338   1.111447e+05  3.126650e+05  2.496963e+01    141479   1
+       370   5.510605e+05  3.126650e+05  3.015878e+01    154846   1
+       396   3.984289e+05  3.126650e+05  3.536933e+01    166758   1
+       400   2.037763e+05  3.126650e+05  3.610123e+01    168358   1
 -------------------------------------------------------------------
 status         : iteration_limit
-total time (s) : 5.038249e+01
-total solves   : 187951
+total time (s) : 3.610123e+01
+total solves   : 168358
 best bound     :  3.126650e+05
-simulation ci  :  3.437536e+05 ± 3.240032e+04
+simulation ci  :  2.988692e+05 ± 2.628178e+04
 numeric issues : 0
 -------------------------------------------------------------------

Plot the optimal inventory levels:

plt = SDDP.publication_plot(
     simulations;
@@ -200,4 +199,4 @@
 ) do data
     return data[:x_inventory].out + data[:u_buy].out
 end
-Plots.hline!(plt, [662]; label = "Analytic solution")
Example block output

We again recover an order-up-to policy. The analytic solution is to order-up-to 662 units. We do not precisely recover this solution because we used a sample average approximation of 20 elements. If we increased the number of samples, our solution would approach the analytic solution.

+Plots.hline!(plt, [662]; label = "Analytic solution")Example block output

We again recover an order-up-to policy. The analytic solution is to order-up-to 662 units. We do not precisely recover this solution because we used a sample average approximation of 20 elements. If we increased the number of samples, our solution would approach the analytic solution.

diff --git a/dev/tutorial/markov_uncertainty/index.html b/dev/tutorial/markov_uncertainty/index.html index 005fd18d5..cd4a1b37a 100644 --- a/dev/tutorial/markov_uncertainty/index.html +++ b/dev/tutorial/markov_uncertainty/index.html @@ -85,14 +85,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.562500e+04 1.991887e+03 5.522013e-03 18 1 - 40 2.750000e+04 8.072917e+03 1.332040e-01 1320 1 + 1 3.750000e+04 1.991887e+03 5.151033e-03 18 1 + 40 3.375000e+04 8.072917e+03 1.328862e-01 1320 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.332040e-01 +total time (s) : 1.328862e-01 total solves : 1320 best bound : 8.072917e+03 -simulation ci : 8.007212e+03 ± 2.328351e+03 +simulation ci : 9.511572e+03 ± 2.649970e+03 numeric issues : 0 -------------------------------------------------------------------

Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.

We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.

simulations = SDDP.simulate(
     model;
@@ -106,4 +106,4 @@
 [stage[:node_index] for stage in simulations[1]]
3-element Vector{Tuple{Int64, Int64}}:
  (1, 1)
  (2, 2)
- (3, 1)
+ (3, 1) diff --git a/dev/tutorial/mdps/index.html b/dev/tutorial/mdps/index.html index 0a0361fff..ff2dfaa03 100644 --- a/dev/tutorial/mdps/index.html +++ b/dev/tutorial/mdps/index.html @@ -61,11 +61,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.499895e+01 1.562631e+00 1.619101e-02 6 1 - 40 8.333333e+00 8.333333e+00 6.723690e-01 246 1 + 1 2.499895e+01 1.562631e+00 1.630497e-02 6 1 + 40 8.333333e+00 8.333333e+00 6.955659e-01 246 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.723690e-01 +total time (s) : 6.955659e-01 total solves : 246 best bound : 8.333333e+00 simulation ci : 8.810723e+00 ± 8.167195e-01 @@ -154,14 +154,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 0.000000e+00 9.000000e+00 2.183914e-03 3 1 - 44 2.400000e+01 6.561000e+00 7.216930e-01 3020 1 + 1 0.000000e+00 1.000000e+01 6.312847e-03 17 1 + 40 0.000000e+00 6.561000e+00 9.120228e-01 3214 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 7.216930e-01 -total solves : 3020 +total time (s) : 9.120228e-01 +total solves : 3214 best bound : 6.561000e+00 -simulation ci : 6.113636e+00 ± 3.054298e+00 +simulation ci : 9.575000e+00 ± 4.641862e+00 numeric issues : 0 -------------------------------------------------------------------

Simulating a cyclic policy graph requires an explicit sampling_scheme that does not terminate early based on the cycle probability:

simulations = SDDP.simulate(
     model,
@@ -179,4 +179,4 @@
 
 print(join([join(path[i, :], ' ') for i in 1:size(path, 1)], '\n'))
1 2 3 ⋅
 ⋅ ▩ 4 †
-† ⋅ 5 *
Tip

This formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?

+† ⋅ 5 *
Tip

This formulation will likely struggle as the number of cells in the maze increases. Can you think of an equivalent formulation that uses fewer state variables?

diff --git a/dev/tutorial/objective_states/index.html b/dev/tutorial/objective_states/index.html index 13a6162a7..76f0ac8f8 100644 --- a/dev/tutorial/objective_states/index.html +++ b/dev/tutorial/objective_states/index.html @@ -79,24 +79,26 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 2.246094e+04 5.000000e+03 2.144408e-02 39 1 - 40 -5.861978e-12 5.092593e+03 1.895840e-01 2160 1 + 1 6.703125e+03 3.160920e+03 2.269411e-02 39 1 + 210 9.281250e+03 5.092593e+03 1.023565e+00 9990 1 + 439 0.000000e+00 5.092593e+03 2.023999e+00 19521 1 + 597 9.765625e+03 5.092593e+03 2.728375e+00 25983 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 1.895840e-01 -total solves : 2160 +total time (s) : 2.728375e+00 +total solves : 25983 best bound : 5.092593e+03 -simulation ci : 5.251786e+03 ± 1.543723e+03 +simulation ci : 5.073010e+03 ± 3.046377e+02 numeric issues : 0 ------------------------------------------------------------------- Finished training and simulating.

To demonstrate how the objective states are updated, consider the sequence of noise observations:

[stage[:noise_term] for stage in simulations[1]]
3-element Vector{@NamedTuple{fuel::Float64, inflow::Float64}}:
  (fuel = 0.9, inflow = 0.0)
- (fuel = 1.1, inflow = 100.0)
- (fuel = 0.9, inflow = 50.0)

This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.

To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.

[stage[:objective_state] for stage in simulations[1]]
3-element Vector{Float64}:
+ (fuel = 1.25, inflow = 100.0)
+ (fuel = 1.1, inflow = 100.0)

This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 1.1 * 37.5 = 41.25. The fuel cost in the third stage should be 0.75 * 41.25 = 30.9375.

To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.

[stage[:objective_state] for stage in simulations[1]]
3-element Vector{Float64}:
  45.0
- 49.50000000000001
- 44.550000000000004

Multi-dimensional objective states

You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).

Here is an example:

model = SDDP.LinearPolicyGraph(;
+ 56.25
+ 61.87500000000001

Multi-dimensional objective states

You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).

Here is an example:

model = SDDP.LinearPolicyGraph(;
     stages = 3,
     sense = :Min,
     lower_bound = 0.0,
@@ -170,18 +172,18 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   1.300000e+04  3.529412e+03  2.303314e-02        39   1
-        69   1.006250e+04  5.053782e+03  3.563740e-01      3591   1
+         1   1.100000e+04  3.209583e+03  2.326894e-02        39   1
+       194   0.000000e+00  5.135984e+03  9.376059e-01      9066   1
 -------------------------------------------------------------------
 status         : simulation_stopping
-total time (s) : 3.563740e-01
-total solves   : 3591
-best bound     :  5.053782e+03
-simulation ci  :  6.274615e+03 ± 9.638861e+02
+total time (s) : 9.376059e-01
+total solves   : 9066
+best bound     :  5.135984e+03
+simulation ci  :  5.339342e+03 ± 6.014698e+02
 numeric issues : 0
 -------------------------------------------------------------------
 
 Finished training and simulating.

This time, since our objective state is two-dimensional, the objective states are tuples with two elements:

[stage[:objective_state] for stage in simulations[1]]
3-element Vector{Tuple{Float64, Float64}}:
- (40.0, 50.0)
- (45.0, 40.0)
- (42.5, 45.0)

Warnings

There are number of things to be aware of when using objective states.

  • The key assumption is that price is independent of the states and actions in the model.

    That means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.

  • Choosing an appropriate Lipschitz constant is difficult.

    The points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.

  • You need to ensure that the cost-to-go function is concave with respect to the objective state before the update.

    If the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.

+ (45.0, 50.0) + (47.5, 45.0) + (58.75, 47.5)

Warnings

There are number of things to be aware of when using objective states.

  • The key assumption is that price is independent of the states and actions in the model.

    That means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.

  • Choosing an appropriate Lipschitz constant is difficult.

    The points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.

  • You need to ensure that the cost-to-go function is concave with respect to the objective state before the update.

    If the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.

diff --git a/dev/tutorial/objective_uncertainty/index.html b/dev/tutorial/objective_uncertainty/index.html index 0b240cfa9..5f9d78fdb 100644 --- a/dev/tutorial/objective_uncertainty/index.html +++ b/dev/tutorial/objective_uncertainty/index.html @@ -82,16 +82,16 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 1.875000e+04 3.958333e+03 3.507137e-03 12 1 - 40 1.125000e+04 1.062500e+04 6.445909e-02 642 1 + 1 2.500000e+04 3.958333e+03 3.526926e-03 12 1 + 40 5.000000e+03 1.062500e+04 6.979108e-02 642 1 ------------------------------------------------------------------- status : simulation_stopping -total time (s) : 6.445909e-02 +total time (s) : 6.979108e-02 total solves : 642 best bound : 1.062500e+04 -simulation ci : 1.009907e+04 ± 2.630322e+03 +simulation ci : 1.313702e+04 ± 2.776498e+03 numeric issues : 0 ------------------------------------------------------------------- -Confidence interval: 10371.25 ± 739.05 -Lower bound: 10625.0 +Confidence interval: 11135.0 ± 755.16 +Lower bound: 10625.0 diff --git a/dev/tutorial/pglib_opf/index.html b/dev/tutorial/pglib_opf/index.html index 7bbda5837..a313b19cb 100644 --- a/dev/tutorial/pglib_opf/index.html +++ b/dev/tutorial/pglib_opf/index.html @@ -61,24 +61,25 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 3.117488e+06 6.644081e+04 4.093120e-01 115 1 - 5 4.172784e+05 3.548923e+05 1.589070e+00 315 1 - 8 1.468074e+06 3.858370e+05 3.510332e+00 632 1 - 10 4.215058e+05 4.113363e+05 4.284235e+00 762 1 + 1 5.258639e+05 6.716719e+04 1.439750e-01 35 1 + 4 1.359574e+06 3.992498e+05 2.520720e+00 416 1 + 6 2.723873e+05 4.062831e+05 3.753092e+00 586 1 + 8 1.418149e+06 4.338115e+05 5.991363e+00 952 1 + 10 7.243390e+04 4.372813e+05 7.110559e+00 1122 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 4.284235e+00 -total solves : 762 -best bound : 4.113363e+05 -simulation ci : 6.709825e+05 ± 5.910293e+05 +total time (s) : 7.110559e+00 +total solves : 1122 +best bound : 4.372813e+05 +simulation ci : 9.063244e+05 ± 4.927642e+05 numeric issues : 0 -------------------------------------------------------------------

To more accurately simulate the dynamics of the problem, a common approach is to write the cuts representing the policy to a file, and then read them into a non-convex model:

SDDP.write_cuts_to_file(convex, "convex.cuts.json")
 non_convex = build_model(PowerModels.ACPPowerModel)
 SDDP.read_cuts_from_file(non_convex, "convex.cuts.json")

Now we can simulate non_convex to evaluate the policy.

result = SDDP.simulate(non_convex, 1)
1-element Vector{Vector{Dict{Symbol, Any}}}:
- [Dict(:bellman_term => 397818.47031456896, :noise_term => 0, :node_index => 1, :stage_objective => 17583.232077542885, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402660.97158015985, :noise_term => 0, :node_index => 1, :stage_objective => 22343.188045965955, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 411579.84125773114, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553493253003, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402660.97158095386, :noise_term => 2, :node_index => 1, :stage_objective => 25694.76518752882, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 411579.84126227174, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553493253006, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 399354.60033408424, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37520168067, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 400881.86912506283, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375201681036, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402409.1379160405, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37520168182, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398858.28681281704, :noise_term => 5, :node_index => 1, :stage_objective => 20255.52248890431, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398313.0035766029, :noise_term => 5, :node_index => 1, :stage_objective => 17583.232079675614, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 399704.033169528, :noise_term => 2, :node_index => 1, :stage_objective => 21433.375201680836, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398858.2867642921, :noise_term => 5, :node_index => 1, :stage_objective => 17732.52817957814, :objective_state => nothing, :belief => Dict(1 => 1.0))]

A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.

Training a non-convex model

We can also build and train a non-convex formulation of the optimal power flow problem.

The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.

non_convex = build_model(PowerModels.ACPPowerModel)
+ [Dict(:bellman_term => 418323.9745907233, :noise_term => 2, :node_index => 1, :stage_objective => 19605.677062319053, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 419783.27782484266, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37409470581, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 425462.8942970197, :noise_term => 0, :node_index => 1, :stage_objective => 21433.375466556274, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 427134.0134909716, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37546655634, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 427996.00322167244, :noise_term => 2, :node_index => 1, :stage_objective => 22472.458323844683, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 430787.58478655294, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553477127232, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 427996.00322415534, :noise_term => 2, :node_index => 1, :stage_objective => 25694.765225825693, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 422896.5929926941, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37409470927, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 418323.97499019356, :noise_term => 5, :node_index => 1, :stage_objective => 21413.659627281315, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 423817.92372080294, :noise_term => 0, :node_index => 1, :stage_objective => 21433.374094720275, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 418323.97499019344, :noise_term => 5, :node_index => 1, :stage_objective => 21413.659625704462, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 417701.5999305695, :noise_term => 5, :node_index => 1, :stage_objective => 17609.312375444722, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 423137.43522056914, :noise_term => 0, :node_index => 1, :stage_objective => 21433.37409470997, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 418544.77022264735, :noise_term => 5, :node_index => 1, :stage_objective => 21433.374094696803, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 424044.61758503545, :noise_term => 0, :node_index => 1, :stage_objective => 21433.3754665555, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 419446.0525659325, :noise_term => 5, :node_index => 1, :stage_objective => 21433.374094705276, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 425076.72086493287, :noise_term => 0, :node_index => 1, :stage_objective => 21433.37546655626, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 420347.3349092156, :noise_term => 5, :node_index => 1, :stage_objective => 21433.374094706483, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 421806.6381520476, :noise_term => 2, :node_index => 1, :stage_objective => 21433.37409470773, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 423265.9413948768, :noise_term => 2, :node_index => 1, :stage_objective => 21433.374094710503, :objective_state => nothing, :belief => Dict(1 => 1.0))]

A problem with reading and writing the cuts to file is that the cuts have been generated from trial points of the convex model. Therefore, the policy may be arbitrarily bad at points visited by the non-convex model.

Training a non-convex model

We can also build and train a non-convex formulation of the optimal power flow problem.

The problem with the non-convex model is that because it is non-convex, SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the true cost of operation.

non_convex = build_model(PowerModels.ACPPowerModel)
 SDDP.train(non_convex; iteration_limit = 10)
 result = SDDP.simulate(non_convex, 1)
1-element Vector{Vector{Dict{Symbol, Any}}}:
- [Dict(:bellman_term => 374564.07356375566, :noise_term => 5, :node_index => 1, :stage_objective => 17559.278882526913, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 376990.65007136966, :noise_term => 2, :node_index => 1, :stage_objective => 17560.6583350399, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 379553.91358410136, :noise_term => 2, :node_index => 1, :stage_objective => 17561.932427333126, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 382265.6152179062, :noise_term => 2, :node_index => 1, :stage_objective => 17564.248169608098, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 383114.63346774597, :noise_term => 2, :node_index => 1, :stage_objective => 21298.541496329763, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 455594.0820490739, :noise_term => 0, :node_index => 1, :stage_objective => 27420.55350575845, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 697068.3058776564, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553516855085, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 700287.2111100241, :noise_term => 0, :node_index => 1, :stage_objective => 728861.327780984, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 391608.66850450746, :noise_term => 2, :node_index => 1, :stage_objective => 27420.553505758322, :objective_state => nothing, :belief => Dict(1 => 1.0))]

Combining convex and non-convex models

To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.

As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.

convex = build_model(PowerModels.DCPPowerModel)
A policy graph with 1 nodes.
+ [Dict(:bellman_term => 401904.413218967, :noise_term => 0, :node_index => 1, :stage_objective => 21433.3746583674, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.2748244479, :noise_term => 0, :node_index => 1, :stage_objective => 21804.883853902094, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.27482644125, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697452972552, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 409696.6815200375, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553333940356, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 412195.0882282316, :noise_term => 0, :node_index => 1, :stage_objective => 27420.5533339405, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407661.30879763776, :noise_term => 2, :node_index => 1, :stage_objective => 27420.5533339403, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 402827.8218545093, :noise_term => 5, :node_index => 1, :stage_objective => 21433.37465836756, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 398189.04030965315, :noise_term => 5, :node_index => 1, :stage_objective => 21433.374658366258, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 399662.996160627, :noise_term => 2, :node_index => 1, :stage_objective => 21433.374658367, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 405212.11027548654, :noise_term => 0, :node_index => 1, :stage_objective => 21433.37465836819, :objective_state => nothing, :belief => Dict(1 => 1.0))  …  Dict(:bellman_term => 407198.27482644125, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745713663, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 409696.6815200375, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553333940356, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.2748288509, :noise_term => 2, :node_index => 1, :stage_objective => 25694.765418652463, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.2748264413, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745713663, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.2748264413, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697454861143, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 409696.6815200375, :noise_term => 0, :node_index => 1, :stage_objective => 27420.553333940356, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.27482885093, :noise_term => 2, :node_index => 1, :stage_objective => 25694.765418652365, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.2748264412, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697457136812, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.27482644137, :noise_term => 2, :node_index => 1, :stage_objective => 23580.69745486098, :objective_state => nothing, :belief => Dict(1 => 1.0)), Dict(:bellman_term => 407198.27482644113, :noise_term => 2, :node_index => 1, :stage_objective => 23580.697454861303, :objective_state => nothing, :belief => Dict(1 => 1.0))]

Combining convex and non-convex models

To summarize, training with the convex model constructs cuts at points that may never be visited by the non-convex model, and training with the non-convex model may construct arbitrarily poor cuts because a key assumption of SDDP is convexity.

As a compromise, we can train a policy using a combination of the convex and non-convex models; we'll use the non-convex model to generate trial points on the forward pass, and we'll use the convex model to build cuts on the backward pass.

convex = build_model(PowerModels.DCPPowerModel)
A policy graph with 1 nodes.
  Node indices: 1
 
non_convex = build_model(PowerModels.ACPPowerModel)
A policy graph with 1 nodes.
  Node indices: 1
@@ -113,16 +114,16 @@
 -------------------------------------------------------------------
  iteration    simulation      bound        time (s)     solves  pid
 -------------------------------------------------------------------
-         1   3.748481e+06  6.717321e+04  4.102278e-01        69   1
-         4   1.861689e+06  2.354052e+05  1.924735e+00       228   1
-         6   1.251373e+06  3.968360e+05  4.189864e+00       450   1
-         9   1.887933e+06  3.992425e+05  5.804354e+00       594   1
-        10   4.030031e+05  4.190937e+05  6.450608e+00       654   1
+         1   9.327645e+04  6.695882e+04  2.322772e-01        18   1
+         2   6.655103e+06  1.011807e+05  1.932665e+00       189   1
+         5   5.528351e+05  2.933743e+05  3.062281e+00       306   1
+         7   5.452180e+06  3.906191e+05  4.611054e+00       456   1
+        10   1.593956e+05  4.079643e+05  5.614497e+00       555   1
 -------------------------------------------------------------------
 status         : iteration_limit
-total time (s) : 6.450608e+00
-total solves   : 654
-best bound     :  4.190937e+05
-simulation ci  :  1.225850e+06 ± 7.384072e+05
+total time (s) : 5.614497e+00
+total solves   : 555
+best bound     :  4.079643e+05
+simulation ci  :  1.394250e+06 ± 1.535039e+06
 numeric issues : 0
--------------------------------------------------------------------

In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.

+-------------------------------------------------------------------

In practice, if we were to simulate non_convex now, we should obtain a better policy than either of the two previous approaches.

diff --git a/dev/tutorial/plotting/69a2680c.svg b/dev/tutorial/plotting/07c31bb8.svg similarity index 84% rename from dev/tutorial/plotting/69a2680c.svg rename to dev/tutorial/plotting/07c31bb8.svg index c43bce02f..20132e92d 100644 --- a/dev/tutorial/plotting/69a2680c.svg +++ b/dev/tutorial/plotting/07c31bb8.svg @@ -1,84 +1,84 @@ - + - + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dev/tutorial/plotting/index.html b/dev/tutorial/plotting/index.html index 79f384697..eb31eac57 100644 --- a/dev/tutorial/plotting/index.html +++ b/dev/tutorial/plotting/index.html @@ -76,14 +76,14 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 4.375000e+04 1.991887e+03 1.403499e-02 18 1 - 20 1.875000e+03 8.072917e+03 5.001402e-02 360 1 + 1 9.375000e+03 1.991887e+03 1.412082e-02 18 1 + 20 5.000000e+03 8.072917e+03 5.055285e-02 360 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.001402e-02 +total time (s) : 5.055285e-02 total solves : 360 best bound : 8.072917e+03 -simulation ci : 1.074973e+04 ± 5.181002e+03 +simulation ci : 7.957724e+03 ± 2.914884e+03 numeric issues : 0 ------------------------------------------------------------------- @@ -106,4 +106,4 @@ xlabel = "Stage", ylims = (0, 200), layout = (1, 2), -)Example block output

You can save this plot as a PDF using the Plots.jl function savefig:

Plots.savefig("my_picture.pdf")

Plotting the value function

You can obtain an object representing the value function of a node using SDDP.ValueFunction.

V = SDDP.ValueFunction(model[(1, 1)])
A value function for node (1, 1)

The value function can be evaluated using SDDP.evaluate.

SDDP.evaluate(V; volume = 1)
(23019.270833333332, Dict(:volume => -157.8125))

evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.

You can also plot the value function using SDDP.plot

SDDP.plot(V, volume = 0:200, filename = "value_function.html")

This should open a webpage that looks like this one.

Convergence dashboard

If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.

SDDP.train(model; dashboard = true)

By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.

Warning

The dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.

+)Example block output

You can save this plot as a PDF using the Plots.jl function savefig:

Plots.savefig("my_picture.pdf")

Plotting the value function

You can obtain an object representing the value function of a node using SDDP.ValueFunction.

V = SDDP.ValueFunction(model[(1, 1)])
A value function for node (1, 1)

The value function can be evaluated using SDDP.evaluate.

SDDP.evaluate(V; volume = 1)
(23019.270833333332, Dict(:volume => -157.8125))

evaluate returns the height of the value function, and a subgradient with respect to the convex state variables.

You can also plot the value function using SDDP.plot

SDDP.plot(V, volume = 0:200, filename = "value_function.html")

This should open a webpage that looks like this one.

Convergence dashboard

If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.

SDDP.train(model; dashboard = true)

By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.

Warning

The dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.

diff --git a/dev/tutorial/spaghetti_plot.html b/dev/tutorial/spaghetti_plot.html index e2af5e13c..4ca3339c9 100644 --- a/dev/tutorial/spaghetti_plot.html +++ b/dev/tutorial/spaghetti_plot.html @@ -230,7 +230,7 @@
diff --git a/dev/tutorial/warnings/index.html b/dev/tutorial/warnings/index.html index b0d6c957d..1e55c25ec 100644 --- a/dev/tutorial/warnings/index.html +++ b/dev/tutorial/warnings/index.html @@ -89,11 +89,11 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 3.000000e+00 3.060102e-03 6 1 - 5 3.500000e+00 3.500000e+00 5.841017e-03 30 1 + 1 6.500000e+00 3.000000e+00 3.360033e-03 6 1 + 5 3.500000e+00 3.500000e+00 6.263018e-03 30 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.841017e-03 +total time (s) : 6.263018e-03 total solves : 30 best bound : 3.500000e+00 simulation ci : 4.100000e+00 ± 1.176000e+00 @@ -134,13 +134,13 @@ ------------------------------------------------------------------- iteration simulation bound time (s) solves pid ------------------------------------------------------------------- - 1 6.500000e+00 1.100000e+01 2.971888e-03 6 1 - 5 5.500000e+00 1.100000e+01 5.368948e-03 30 1 + 1 6.500000e+00 1.100000e+01 2.955914e-03 6 1 + 5 5.500000e+00 1.100000e+01 5.394936e-03 30 1 ------------------------------------------------------------------- status : iteration_limit -total time (s) : 5.368948e-03 +total time (s) : 5.394936e-03 total solves : 30 best bound : 1.100000e+01 simulation ci : 5.700000e+00 ± 3.920000e-01 numeric issues : 0 --------------------------------------------------------------------

How do we tell which is more appropriate? There are a few clues that you should look out for.

  • The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.

  • The bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.

  • The bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.

+-------------------------------------------------------------------

How do we tell which is more appropriate? There are a few clues that you should look out for.

  • The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in An introduction to SDDP.jl.

  • The bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.

  • The bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.