Skip to content

Commit

Permalink
Add initial value to batch_csr solve function
Browse files Browse the repository at this point in the history
Create an implementation of the solve function which allows an initial value to be passed to the iterative solver in the batch_csr class. Closes #392

See merge request gysela-developpers/gyselalibxx!786

--------------------------------------------
  • Loading branch information
AbdelhadiKara committed Dec 13, 2024
1 parent ac84aff commit 725001a
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 44 deletions.
5 changes: 2 additions & 3 deletions src/geometryRTheta/time_solver/bsl_predcorr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ class BslPredCorrRTheta : public ITimeSolverRTheta
host_t<DVectorFieldMemRTheta<X, Y>>,
Kokkos::DefaultHostExecutionSpace>
time_stepper(grid);

host_t<DFieldMemRTheta> electrical_potential(grid);
start_time = std::chrono::system_clock::now();
for (int iter(0); iter < steps; ++iter) {
time_stepper
Expand All @@ -184,8 +184,7 @@ class BslPredCorrRTheta : public ITimeSolverRTheta
define_advection_field,
advect_allfdistribu);

host_t<DFieldMemRTheta> electrical_potential(grid);
host_t<Spline2DMem> allfdistribu_coef(get_spline_idx_range(m_builder));

m_builder(get_field(allfdistribu_coef), get_const_field(allfdistribu));
PoissonLikeRHSFunction const
charge_density_coord(get_const_field(allfdistribu_coef), m_spline_evaluator);
Expand Down
71 changes: 45 additions & 26 deletions src/pde_solvers/polarpoissonlikesolver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,9 @@ class PolarSplineFEMPoissonLikeSolver
m_polar_spline_evaluator;
std::unique_ptr<MatrixBatchCsr<Kokkos::DefaultExecutionSpace, MatrixBatchCsrSolver::CG>>
m_gko_matrix;
mutable host_t<SplinePolar> m_phi_spline_coef;
Kokkos::View<double**, Kokkos::LayoutRight> m_x_init;

const int m_batch_idx {0}; // TODO: Remove when batching is supported

public:
Expand Down Expand Up @@ -305,8 +308,20 @@ class PolarSplineFEMPoissonLikeSolver
, m_int_volume(
IdxRangeQuadratureRTheta(m_idxrange_quadrature_r, m_idxrange_quadrature_theta))
, m_polar_spline_evaluator(ddc::NullExtrapolationRule())
, m_phi_spline_coef(
PolarBSplinesRTheta::template singular_idx_range<PolarBSplinesRTheta>(),
IdxRangeBSRTheta(
m_idxrange_bsplines_r,
ddc::discrete_space<BSplinesTheta>().full_domain()))
, m_x_init(
"x_init",
1,
ddc::discrete_space<PolarBSplinesRTheta>().nbasis()
- ddc::discrete_space<BSplinesTheta>().nbasis())
{
static_assert(has_2d_jacobian_v<Mapping, CoordRTheta>);
//initialize x_init
Kokkos::deep_copy(m_x_init, 0);
// Get break points
IdxRange<KnotsR> idxrange_r_edges = ddc::discrete_space<BSplinesR>().break_point_domain();
IdxRange<KnotsTheta> idxrange_theta_edges
Expand Down Expand Up @@ -445,13 +460,13 @@ class PolarSplineFEMPoissonLikeSolver
const int n_matrix_elements = n_elements_singular + n_elements_overlap + n_elements_stencil;

//CSR data storage
Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::DefaultHostExecutionSpace>
Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::HostSpace>
values_csr_host("values_csr", batch_size, n_matrix_elements);
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::DefaultHostExecutionSpace>
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::HostSpace>
col_idx_csr_host("idx_csr", n_matrix_elements);
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::DefaultExecutionSpace>
nnz_per_row_csr("nnz_per_row_csr", matrix_size + 1);
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::DefaultHostExecutionSpace>
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::HostSpace>
nnz_per_row_csr_host("nnz_per_row_csr", matrix_size + 1);

init_nnz_per_line(nnz_per_row_csr);
Expand Down Expand Up @@ -690,8 +705,8 @@ class PolarSplineFEMPoissonLikeSolver
* The rhs @f$ \rho@f$ of the Poisson-like equation.
* The type is templated but we can use the PoissonLikeRHSFunction
* class.
* @param[out] spline
* The spline representation of the solution @f$\phi@f$.
* @param[inout] spline
* The spline representation of the solution @f$\phi@f$, also used as initial data for the iterative solver.
*/
template <class RHSFunction>
void operator()(RHSFunction const& rhs, host_t<SplinePolar>& spline) const
Expand All @@ -705,14 +720,21 @@ class PolarSplineFEMPoissonLikeSolver
const int b_size = ddc::discrete_space<PolarBSplinesRTheta>().nbasis()
- ddc::discrete_space<BSplinesTheta>().nbasis();
const int batch_size = 1;
Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::DefaultHostExecutionSpace>
// Create b for rhs
Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::HostSpace>
b_host("b_host", batch_size, b_size);

//Create an initial guess
Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::HostSpace>
x_init_host("x_init_host", batch_size, b_size);
// Fill b
ddc::for_each(
PolarBSplinesRTheta::template singular_idx_range<PolarBSplinesRTheta>(),
[&](IdxBSPolar const idx) {
b_host(0, idx.uid()) = ddc::transform_reduce(
const int bspl_idx = idx
- PolarBSplinesRTheta::template singular_idx_range<
PolarBSplinesRTheta>()
.front();
b_host(0, bspl_idx) = ddc::transform_reduce(
m_idxrange_quadrature_singular,
0.0,
ddc::reducer::sum<double>(),
Expand Down Expand Up @@ -778,19 +800,20 @@ class PolarSplineFEMPoissonLikeSolver
return rhs(coord) * rb * pb * m_int_volume(idx_r, idx_theta);
});
});
b_host(0, idx.uid()) = element;
const std::size_t singular_index
= idx - ddc::discrete_space<PolarBSplinesRTheta>().full_domain().front();
b_host(0, singular_index) = element;
});

Kokkos::View<double**, Kokkos::LayoutRight, Kokkos::DefaultExecutionSpace>
b("b", batch_size, b_size);
Kokkos::View<double**, Kokkos::LayoutRight> b("b", batch_size, b_size);
Kokkos::deep_copy(b, b_host);
Kokkos::Profiling::popRegion();

Kokkos::deep_copy(m_x_init, x_init_host);
// Solve the matrix equation
Kokkos::Profiling::pushRegion("PolarPoissonSolve");
m_gko_matrix->solve(b);

Kokkos::deep_copy(b_host, b);
m_gko_matrix->solve(m_x_init, b);
Kokkos::deep_copy(x_init_host, m_x_init);
//-----------------
IdxRangeBSRTheta dirichlet_boundary_idx_range(
m_idxrange_bsplines_r.take_last(IdxStep<BSplinesR> {1}),
Expand All @@ -806,11 +829,11 @@ class PolarSplineFEMPoissonLikeSolver
- PolarBSplinesRTheta::template singular_idx_range<
PolarBSplinesRTheta>()
.front();
spline.singular_spline_coef(idx) = b_host(0, bspl_idx);
spline.singular_spline_coef(idx) = x_init_host(0, bspl_idx);
});
ddc::for_each(m_idxrange_fem_non_singular, [&](IdxBSPolar const idx) {
const IdxBSRTheta idx_2d(PolarBSplinesRTheta::get_2d_index(idx));
spline.spline_coef(idx_2d) = b_host(0, idx.uid());
spline.spline_coef(idx_2d) = x_init_host(0, idx.uid());
});
ddc::for_each(dirichlet_boundary_idx_range, [&](IdxBSRTheta const idx) {
spline.spline_coef(idx) = 0.0;
Expand Down Expand Up @@ -842,8 +865,8 @@ class PolarSplineFEMPoissonLikeSolver
* The rhs @f$ \rho@f$ of the Poisson-like equation.
* The type is templated but we can use the PoissonLikeRHSFunction
* class.
* @param[out] phi
* The values of the solution @f$\phi@f$ on the given coords_eval.
* @param[inout] phi
* The values of the solution @f$\phi@f$ on the given coords_eval, also used as initial data for the iterative solver.
*/
template <class RHSFunction>
void operator()(RHSFunction const& rhs, host_t<DFieldRTheta> phi) const
Expand All @@ -852,18 +875,15 @@ class PolarSplineFEMPoissonLikeSolver
std::is_invocable_r_v<double, RHSFunction, CoordRTheta>,
"RHSFunction must have an operator() which takes a coordinate and returns a "
"double");
IdxRangeBSTheta idxrange_polar(ddc::discrete_space<BSplinesTheta>().full_domain());
host_t<SplinePolar>
spline(PolarBSplinesRTheta::template singular_idx_range<PolarBSplinesRTheta>(),
IdxRangeBSRTheta(m_idxrange_bsplines_r, idxrange_polar));

(*this)(rhs, spline);

(*this)(rhs, m_phi_spline_coef);
host_t<CoordFieldMemRTheta> coords_eval_alloc(get_idx_range(phi));
host_t<CoordFieldRTheta> coords_eval(get_field(coords_eval_alloc));
ddc::for_each(get_idx_range(phi), [&](IdxRTheta idx) {
coords_eval(idx) = ddc::coordinate(idx);
});
m_polar_spline_evaluator(phi, get_const_field(coords_eval), spline);
m_polar_spline_evaluator(phi, get_const_field(coords_eval), m_phi_spline_coef);
}

private:
Expand Down Expand Up @@ -1205,8 +1225,7 @@ class PolarSplineFEMPoissonLikeSolver
*
* @param[out] nnz A 1D Kokkos view of length matrix_size+1 which stores the count of the non-zeros along the lines of the matrix.
*/
void init_nnz_per_line(
Kokkos::View<int*, Kokkos::LayoutRight, Kokkos::DefaultExecutionSpace> nnz) const
void init_nnz_per_line(Kokkos::View<int*, Kokkos::LayoutRight> nnz) const
{
Kokkos::Profiling::pushRegion("PolarPoissonInitNnz");
size_t const mat_size = nnz.extent(0) - 1;
Expand Down
37 changes: 22 additions & 15 deletions vendor/sll/include/sll/matrix_batch_csr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,19 @@ class MatrixBatchCsr : public MatrixBatch<ExecSpace>
{
BatchedRHS x_view("x_view", batch_size(), size());
Kokkos::deep_copy(x_view, b);
solve(x_view, b);
Kokkos::deep_copy(b, x_view);
}

/**
* @brief Solve the batched linear problem Ax=b.
*
* @param[in] x A 2D Kokkos::View storing the batched inital guests (useful for iterative solver) of the problems, and receiving the corresponding solutions.
*
* @param[in] b A 2D Kokkos::View storing the batched right-hand side of the problems.
*/
void solve(BatchedRHS const x, BatchedRHS const b) const
{
if constexpr (
Solver == MatrixBatchCsrSolver::CG || Solver == MatrixBatchCsrSolver::BICGSTAB) {
for (size_t i = 0; i < batch_size(); i++) {
Expand All @@ -265,27 +277,25 @@ class MatrixBatchCsr : public MatrixBatch<ExecSpace>
m_solver[i]->add_logger(logger);
m_solver[i]
->apply(to_gko_multivector(gko_exec, b)->create_const_view_for_item(i),
to_gko_multivector(gko_exec, x_view)->create_view_for_item(i));
to_gko_multivector(gko_exec, x)->create_view_for_item(i));
m_solver[i]->remove_logger(logger);

// Check convergency
if (!logger->has_converged()) {
throw std::runtime_error("Ginkgo did not converge in MatrixBatchCsr");
}

// save logger data
if (m_with_logger) {
std::fstream log_file("csr_log.txt", std::ios::out | std::ios::app);
save_logger(
log_file,
i,
m_batch_matrix_csr->create_const_view_for_item(i),
Kokkos::subview(x_view, i, Kokkos::ALL),
Kokkos::subview(x, i, Kokkos::ALL),
Kokkos::subview(b, i, Kokkos::ALL),
logger,
m_tol);
log_file.close();
}
// Check convergency
if (!logger->has_converged()) {
throw std::runtime_error("Ginkgo did not converge in MatrixBatchCsr");
}
}
} else {
std::shared_ptr const gko_exec = m_solver->get_executor();
Expand All @@ -295,21 +305,18 @@ class MatrixBatchCsr : public MatrixBatch<ExecSpace>

// Solve & log
m_solver->add_logger(logger);
m_solver->apply(to_gko_multivector(gko_exec, b), to_gko_multivector(gko_exec, x_view));
m_solver->apply(to_gko_multivector(gko_exec, b), to_gko_multivector(gko_exec, x));
m_solver->remove_logger(logger);

// Check convergency
check_conv(batch_size(), m_tol, gko_exec, logger);

// Save logger data
if (m_with_logger) {
std::fstream log_file("csr_log.txt", std::ios::out | std::ios::app);
save_logger(log_file, m_batch_matrix_csr, x_view, b, logger, m_tol);
save_logger(log_file, m_batch_matrix_csr, x, b, logger, m_tol);
log_file.close();
}
// Check convergence
check_conv(batch_size(), m_tol, gko_exec, logger);
}

Kokkos::deep_copy(b, x_view);
}

/**
Expand Down

0 comments on commit 725001a

Please sign in to comment.