-
Notifications
You must be signed in to change notification settings - Fork 62
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Historical Simulation #707
Comments
How did you define the
The historical simulation needs to be a vector of Your nodes like like they are Markovian Let me know if parts of the documentation are not clear and I can improve. |
I used a Markov chain for the nodes because of nonlinearity in constraint. When I pass nothing as the random variable I get this error
#Function to generate price scenerios (Monte Carlo Simulation)
function simulator()
μ, α = 0, 9.05372307007077
num_samples = 10000
samples = μ .+ α * randn(num_samples)
noise = samples
ε = collect(Float64, noise)
VRE = data.VRE
Load = data.Load
C1 = data.C1
C2 = data.C2
C3 = data.C3
C4 = data.C4
S1 = data.S1
S2 = data.S2
S3 = data.S3
S4 = data.S4
price = zeros(8760) # Initialize array to store scenarios
for t in 1:8760
# Access specific row of data for this iteration
reg = (-10.5073* S1[t]) + (7.1459 * C1[t]) + (2.9938 * S2[t]) + (3.6828 * C2[t]) + (0.6557* S3[t]) + (1.9710 * C3[t]) + (0.9151 * S4[t]) + (-1.4882 * C4[t]) + (0.0000705481036188987 * Load[t]) + (-0.000673131618513161 * VRE[t]) + 25.6773336136185 + rand(ε)
price[t] = reg # Store the generated price in the scenario array
end
return price
end
# @btime simulator()
# Generate price scenarios
num_scenarios = 2
scenarios = [simulator() for _ in 1:num_scenarios]
#Create animation plot
anim = Animation()
p = plot(1:8760, scenarios[1], label="Scenario 1", xlim=(1, 8760), ylim=(minimum(scenarios)..., maximum(scenarios)...))
for i in 2:num_scenarios
plot!(p, 1:8760, scenarios[i], label="Scenario $i")
frame(anim, p)
end
gif(anim, "/projects/reak4480/Documents/Plots/SDDP_animation.gif", fps = 5)
#Static plot for price
p_plot = Plots.plot(
[simulator() for _ in 1:num_scenarios];
legend = false,
xlabel = "hour",
ylabel = "Price [\$/MW]",
)
Plots.savefig("/projects/reak4480/Documents/Plots/static_price_plot.png")
#Create Markovian Graph
graph = SDDP.MarkovianGraph(simulator; budget = 8760, scenarios = num_scenarios)
#Model
Ptaker = SDDP.PolicyGraph(
graph,
sense = :Max,
upper_bound = 900000000,
optimizer = HiGHS.Optimizer,
) do sp, node
t, price = node
E_min = 0
p = 1000 #MW
h = 100 #hours
E_max = h*p #MWh
PF = 0
eff_c = 0.725
eff_dc = 0.507
cost_op = 0
rate_loss = 0
ini_energy = 100000
#state variable
@variable(
sp,
0 <= e_stored <= E_max,
SDDP.State,
initial_value = ini_energy,
)
@variables(
sp, begin
e_pur
e_sold
e_discharge >= 0
e_charge >= 0
E_min <= e_aval <= E_max
e_loss
rev
cost
cost_pur
z_charge, Bin
z_dischar, Bin
end
)
@constraints(
sp, begin
e_charge <= (z_charge * p)
e_discharge <= (z_dischar * p)
z_charge + z_dischar <= 1
e_charge == e_pur*eff_c
e_discharge == (e_sold/eff_dc)
e_aval == (e_stored.out-e_loss)
end
)
#Transiton Matrix and Constraints
@constraints(
sp, begin
e_loss == (e_stored.out*rate_loss)
rev == price * e_sold
cost == (cost_pur+(cost_op*e_aval))
cost_pur == (e_pur * price)
e_stored.out == e_stored.in + e_charge - e_discharge
end
)
@stageobjective(sp, (rev-cost))
end
#Train Model
SDDP.train(
Ptaker;
iteration_limit = 30,
)
# Price data in a vector named 'Price' from DataFrame
Price = data[!, :Price]
# Create a vector of stage-price pairs
stage_price_pairs = [((t, Price[t]), nothing) for t in eachindex(Price)]
typeof(stage_price_pairs)
out_of_sample = SDDP.Historical(stage_price_pairs)
typeof(out_of_sample)
# Use the SDDP.simulate function with the defined sampling scheme to simulate the policy
simulations = SDDP.simulate(Ptaker, sampling_scheme = out_of_sample,) I've read the documentation on Historic simulation, but I'm still not getting how to apply it to a Markov chain.| Thanks for your help |
Your issue is that You cannot then choose a price that does not correspond to a node. Instead of I can't run your code because I don't know what using SDDP, HiGHS
function simulator()
μ, α = 0, 9.05372307007077
num_samples = 10_000
# TODO: this creates a new random vector every time you call simulator?
ε = μ .+ α * randn(num_samples)
price = zeros(8760)
for t in 1:8760
price[t] =
-10.5073 * data.S1[t] + 7.1459 * data.C1[t] +
2.9938 * data.S2[t] + 3.6828 * data.C2[t] +
0.6557 * data.S3[t] + 1.9710 * data.C3[t] +
0.9151 * data.S4[t] + -1.4882 * data.C4[t] +
0.0000705481036188987 * data.Load[t] +
-0.000673131618513161 * data.VRE[t] +
25.6773336136185 + rand(ε)
end
return price
end
graph = SDDP.MarkovianGraph(simulator; budget = 8760, scenarios = 2)
model = SDDP.PolicyGraph(
graph;
sense = :Max,
# TODO: this upper bound is too large. Consider reformulating the problem to
# make it smaller.
upper_bound = 900_000_000,
# TODO: consider using Gurobi instead. It is much more reliable.
optimizer = HiGHS.Optimizer,
) do sp, node
t, price = node
E_min = 0
p = 1_000 # MW
h = 100 # hours
E_max = h * p # MWh
PF = 0
eff_c = 0.725
eff_dc = 0.507
cost_op = 0
rate_loss = 0
ini_energy = 100_000
@variables(sp, begin
0 <= e_stored <= E_max, SDDP.State, (initial_value = ini_energy)
0 <= e_discharge <= p
0 <= e_charge <= p
E_min <= e_aval <= E_max
z_charge, Bin
z_discharge, Bin
end)
@expressions(sp, begin
e_pur, e_charge / eff_c
e_sold, e_discharge / eff_dc
end)
@constraints(sp, begin
e_charge <= p * z_charge
e_discharge <= p * z_discharge
z_charge + z_discharge <= 1
e_stored.out == e_stored.in + e_charge - e_discharge
e_aval == (1 - rate_loss) * e_stored.out
end)
SDDP.parameterize(sp, [price]) do ω
@stageobjective(sp, ω * e_sold - (ω * e_pur + cost_op * e_aval))
end
end
SDDP.train(model; iteration_limit = 30)
nodes = collect(keys(graph.nodes))
function closest_node(nodes, t, p)
_, i = findmin(t == t_ ? Inf : (p_ - p_)^2 for (t_, p_) in nodes)
return nodes[i]
end
stage_price_pairs = [
(closest_node(nodes, t, data[t, :Price]), data[t, :Price]) for
t in size(data, 1),
]
simulations = SDDP.simulate(
model,
sampling_scheme = SDDP.Historical(stage_price_pairs),
) If you want to simulate using the |
Thank you for adjusting my code. #TODO 1 - Yes, but I adjusted the function to this function simulator()
μ, α = 0, 9.05372307007077
price = zeros(8760)
for t in 1:8760
ε = μ + α * randn() # Generate a new random number for each iteration
price[t] =
-10.5073 * data.S1[t] + 7.1459 * data.C1[t] +
2.9938 * data.S2[t] + 3.6828 * data.C2[t] +
0.6557 * data.S3[t] + 1.9710 * data.C3[t] +
0.9151 * data.S4[t] + -1.4882 * data.C4[t] +
0.0000705481036188987 * data.Load[t] +
-0.000673131618513161 * data.VRE[t] +
25.6773336136185 + ε
end
return price
end 2 - The upper bound was random, I intend to estimate the bound using a deterministic JuMP model 3 - Gurobi works for smaller problems, but I run into license issues because it is single-use probably because the Gurobi licence token is called for every subproblem. I tried using using JuMP, Gurobi
const GRB_ENV = Gurobi.Env()
model_1 = Model(() -> Gurobi.Optimizer(GRB_ENV)) but I run into instance errors. I've been searching for this "graph.nodes". Thanks. The function closest node returns the same node for all calls in the stage_price_pair. Is that normal? I get a bound error when I try to train and simulate using SDDP.SimulatorSamplingScheme(simulator). Same simulator used to construct the markov chain. Thank you for taking the time to help me with this!!! |
What error? That should work.
I made a typo, it should be
Try SDDP.parameterize(sp, [(price,)]) do (ω,)
@stageobjective(sp, ω * e_sold - (ω * e_pur + cost_op * e_aval))
end |
Hello Odow, Thanks for your feedback, I still face issues running historical sampling scheme. When I implemented your correction, I got this error
Does this mean I have to define a root node separately? When I try to perform the simulator sampling scheme simulation, I get a bound error,
Is it okay If I email you the code and data file for troubleshooting? |
d'oh. Another typo. I obviously wasn't concentrating... See if you can spot the problem here: _, i = findmin(t == t_ ? Inf : (p_ - p_)^2 for (t_, p_) in nodes) The syntax
Please try to keep everything public. Sometimes the act of trying to distill a simpler example to post on GitHub can reveal the problem. I think it's also helpful for the (very few) other people reading this to see our debugging process (and my mistakes). |
This fixed the key error problem, _, i = findmin((t == t_ ? (p - p_)^2 : Inf) for (t_, p_) in nodes) and I get a Vector{Tuple{Tuple{Int64, Float64}, Float64}} for the stage price pairs The problem is after simulation, I have a 0 stage objective for all stages. I tried modifying the parameterize function with no improvement SDDP.parameterize(sp, ([price]),) do ω
@constraints(sp, begin
rev == ω * e_sold
cost == (cost_pur+(cost_op*e_aval))
cost_pur == (e_pur * ω)
end)
@stageobjective(sp, rev - cost)
end
end |
You cannot add constraints in Can you provide the training log? What is the output of |
Hello Odow, After reverting back to below, it worked! )
SDDP.parameterize(sp, [(price,)]) do (ω,)
@stageobjective(sp, ω * e_sold - (ω * e_pur + cost_op * e_aval))
end
end And the output of the stage_price_pairs used in SDDP.Historical(stage_price_pairs) is something like
I'm curious, are expressions allowed? when I tried to use SDDP.parameterize(sp, [(price,)]) do (ω,)
@stageobjective(sp, ω * e_sold - (ω * e_pur + cost_op * e_aval))
@expressions(sp, begin
cost, ω * e_pur + cost_op * e_aval
rev, ω * e_sold
end)
end I got this error even though I haven't defined it anywhere else. iteration simulation bound time (s) solves pidERROR: An object of name cost is already attached to this model. If this
The next problem I need help with is using the stimulator to train and simulate. simulations = SDDP.simulate(
Ptaker, 1;
sampling_scheme = SDDP.SimulatorSamplingScheme(simulator),
); I get the Bounds Error
Thanks for your patience and help |
The stuff in this function gets called every time we want to parameterize the model by a different realization of the random variable. So it does get called multiple times. You could use the anonymous expression syntax: cost = @expression(sp, ω * e_pur + cost_op * e_aval)
rev = @expression(sp, ω * e_sold)
@stageobjective(sp, rev - cost)
I think you might need to use: SDDP.parameterize(sp, [(price, nothing)]) do (ω, _)
@stageobjective(sp, ω * e_sold - (ω * e_pur + cost_op * e_aval))
end This is a bug that I should fix. |
Thanks Odow! The fixes worked! |
no problem 😄 |
Hello, I'm curious If I can use custom recorders to evaluate the anonymous expression in the parameterize function SDDP.parameterize(sp, [price]) do ω
cost_pur = @expression(sp, ω * e_pur)
w_cost = @expression(sp, cost_pur + (cost_op * e_aval))
w_rev = @expression(sp, ω * e_sold)
@stageobjective(sp, w_rev - w_cost)
return
end |
I'm trying to simulate a sequence of random variable,
but I get this error
ERROR: MethodError: no method matching sample_scenario(::SDDP.PolicyGraph{Tuple{Int64, Float64}}, ::SDDP.Historical{Int64, Float64}) Closest candidates are: sample_scenario(::SDDP.PolicyGraph{T}, ::Union{SDDP.InSampleMonteCarlo, SDDP.OutOfSampleMonteCarlo{T}}) where T at ~/.julia/packages/SDDP/ZJfQL/src/plugins/sampling_schemes.jl:266 sample_scenario(::SDDP.PolicyGraph{T}, ::SDDP.Historical{T, NoiseTerm}; kwargs...) where {T, NoiseTerm} at ~/.julia/packages/SDDP/ZJfQL/src/plugins/sampling_schemes.jl:417 sample_scenario(::SDDP.PolicyGraph{T}, ::SDDP.PSRSamplingScheme{A}; kwargs...) where {T, A} at ~/.julia/packages/SDDP/ZJfQL/src/plugins/sampling_schemes.jl:459 ... Stacktrace: [1] _simulate(model::SDDP.PolicyGraph{Tuple{Int64, Float64}}, variables::Vector{Symbol}; sampling_scheme::SDDP.Historical{Int64, Float64}, custom_recorders::Dict{Symbol, Function}, duality_handler::Nothing, skip_undefined_variables::Bool, incoming_state::Dict{Symbol, Float64}) @ SDDP ~/.julia/packages/SDDP/ZJfQL/src/algorithm.jl:1158 [2] (::SDDP.var"#247#248"{Base.Pairs{Symbol, Any, NTuple{5, Symbol}, NamedTuple{(:sampling_scheme, :custom_recorders, :duality_handler, :skip_undefined_variables, :incoming_state), Tuple{SDDP.Historical{Int64, Float64}, Dict{Symbol, Function}, Nothing, Bool, Dict{Symbol, Float64}}}}, SDDP.PolicyGraph{Tuple{Int64, Float64}}, Vector{Symbol}})(i::Int64) @ SDDP ~/.julia/packages/SDDP/ZJfQL/src/plugins/parallel_schemes.jl:63 [3] iterate @ ./generator.jl:47 [inlined] [4] _collect @ ./array.jl:807 [inlined] [5] collect_similar @ ./array.jl:716 [inlined] [6] map @ ./abstractarray.jl:2933 [inlined] [7] #_simulate#246 @ ~/.julia/packages/SDDP/ZJfQL/src/plugins/parallel_schemes.jl:62 [inlined] [8] #simulate#109 @ ~/.julia/packages/SDDP/ZJfQL/src/algorithm.jl:1360 [inlined] [9] top-level scope @ REPL[82]:1
Am I doing something wrong?
The text was updated successfully, but these errors were encountered: