Skip to content

Commit

Permalink
Fix SimulatorSamplingScheme for deterministic nodes and update docs (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
odow authored Nov 13, 2023
1 parent 295bd2c commit a33cd24
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 5 deletions.
7 changes: 6 additions & 1 deletion docs/src/tutorial/example_milk_producer.jl
Original file line number Diff line number Diff line change
Expand Up @@ -161,8 +161,13 @@ model = SDDP.PolicyGraph(
x_stock.in + ω_production + u_spot_buy - x_forward[1].in - u_spot_sell
)
## The random variables. `price` comes from the Markov node
##
## !!! warning
## The elements in Ω MUST be a tuple with 1 or 2 values, where the first
## value is `price` and the second value is the random variable for the
## current node. If the node is deterministic, use Ω = [(price,)].
Ω = [(price, p) for p in Ω_production]
SDDP.parameterize(sp, Ω) do ω::Tuple{Float64,Float64}
SDDP.parameterize(sp, Ω) do ω
## Fix the ω_production variable
fix(ω_production, ω[2])
@stageobjective(
Expand Down
10 changes: 6 additions & 4 deletions src/plugins/sampling_schemes.jl
Original file line number Diff line number Diff line change
Expand Up @@ -481,8 +481,9 @@ which returns a `Vector{Float64}` when called with no arguments like
This sampling scheme must be used with a Markovian graph constructed from the
same `simulator`.
The sample space for [`SDDP.parameterize`](@ref) must be a tuple in which the
first element is the Markov state.
The sample space for [`SDDP.parameterize`](@ref) must be a tuple with 1 or 2
values, value is the Markov state and the second value is the random variable
for the current node. If the node is deterministic, use `Ω = [(markov_state,)]`.
This sampling scheme generates a new scenario by calling `simulator()`, and then
picking the sequence of nodes in the Markovian graph that is closest to the new
Expand All @@ -508,7 +509,7 @@ julia> model = SDDP.PolicyGraph(
@variable(sp, x >= 0, SDDP.State, initial_value = 1)
@variable(sp, u >= 0)
@constraint(sp, x.out == x.in - u)
# Elements of Ω must be a tuple in which `markov_state` is the first
# Elements of Ω MUST be a tuple in which `markov_state` is the first
# element.
Ω = [(markov_state, (u = u_max,)) for u_max in (0.0, 0.5)]
SDDP.parameterize(sp, Ω) do (markov_state, ω)
Expand Down Expand Up @@ -559,7 +560,8 @@ function sample_scenario(
noise_terms = get_noise_terms(InSampleMonteCarlo(), node, node_index)
noise = sample_noise(noise_terms)
@assert noise[1] == node_index[2]
push!(scenario_path, (node_index, (value, noise[2])))
ω = length(noise) == 1 ? (value,) : (value, noise[2])
push!(scenario_path, (node_index, ω))
end
return scenario_path, false
end
64 changes: 64 additions & 0 deletions test/plugins/sampling_schemes.jl
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,70 @@ function test_OutOfSampleMonteCarlo_initial_node()
end
end

function test_SimulatorSamplingScheme()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price,)]) do ω
return SDDP.@stageobjective(sp, price * x.out)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] in ((40.0,), (50.1,), (59.6,))
return
end

function test_SimulatorSamplingScheme_with_noise()
function simulator()
inflow = zeros(3)
current = 50.0
Ω = [-10.0, 0.1, 9.6]
for t in 1:3
current += rand(Ω)
inflow[t] = current
end
return inflow
end
graph = SDDP.MarkovianGraph(simulator; budget = 8, scenarios = 30)
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
direct_mode = false,
) do sp, node
t, price = node
@variable(sp, 0 <= x <= 1, SDDP.State, initial_value = 0)
SDDP.parameterize(sp, [(price, i) for i in 1:2]) do ω
return SDDP.@stageobjective(sp, price * x.out + i)
end
end
sampler = SDDP.SimulatorSamplingScheme(simulator)
scenario, _ = SDDP.sample_scenario(model, sampler)
@test length(scenario) == 3
@test haskey(graph.nodes, scenario[1][1])
@test scenario[1][2] isa Tuple{Float64,Int}
@test scenario[1][2][1] in (40.0, 50.1, 59.6)
@test scenario[1][2][2] in 1:3
return
end

end # module

TestSamplingSchemes.runtests()

0 comments on commit a33cd24

Please sign in to comment.