Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_HypreSolver.H
Go to the documentation of this file.
1
7#ifndef AMREX_HYPRE_SOLVER_H_
8#define AMREX_HYPRE_SOLVER_H_
9
10#include <AMReX_Geometry.H>
11#include <AMReX_iMultiFab.H>
12#include <AMReX_HypreIJIface.H>
13#include <AMReX_BLProfiler.H>
14
15#include "HYPRE.h"
16#include "_hypre_utilities.h"
17
18#include <string>
19
20namespace amrex
21{
22
31template <int MSS>
33{
34public:
35
62 template <class Marker, class Filler>
63 HypreSolver (Vector<IndexType> const& a_index_type,
64 IntVect const& a_nghost,
65 Geometry const& a_geom,
66 BoxArray const& a_grids,
67 DistributionMapping const& a_dmap, // NOLINT(modernize-pass-by-value)
68 Marker && a_marker,
69 Filler && a_filler,
70 int a_verbose = 0,
71 std::string a_options_namespace = "hypre");
72
73 template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
74 std::is_same_v<typename MF::value_type,
75 HYPRE_Real>, int> = 0>
86 void solve (Vector<MF *> const& a_soln,
87 Vector<MF const*> const& a_rhs,
88 HYPRE_Real rel_tol, HYPRE_Real abs_tol, int max_iter);
89
90 int getNumIters () const { return m_hypre_ij->getNumIters(); }
91
92 HYPRE_Real getFinalResidualNorm () const {
93 return m_hypre_ij->getFinalResidualNorm();
94 }
95
96 HYPRE_IJMatrix getA () const { return m_hypre_ij->A(); }
97 HYPRE_IJVector getb () const { return m_hypre_ij->b(); }
98 HYPRE_IJVector getx () const { return m_hypre_ij->x(); }
99
100// public: // for cuda
101
102 template <class Marker>
103#ifdef AMREX_USE_CUDA
104 std::enable_if_t<IsCallable<Marker,int,int,int,int,int>::value>
105#else
106 std::enable_if_t<IsCallableR<bool,Marker,int,int,int,int,int>::value>
107#endif
108 fill_local_id (Marker const& marker);
109
110 template <typename AI>
111 void fill_global_id ();
112
113 template <class Filler,
114 std::enable_if_t<IsCallable<Filler,int,int,int,int,int,
116 HYPRE_Int&, HYPRE_Int*,
117 HYPRE_Real*>::value,int> FOO = 0>
118 void fill_matrix (Filler const& filler);
119
120 template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
121 std::is_same_v<typename MF::value_type,
122 HYPRE_Real>, int> = 0>
123 void load_vectors (Vector<MF *> const& a_soln,
124 Vector<MF const*> const& a_rhs);
125
126 template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
127 std::is_same_v<typename MF::value_type,
128 HYPRE_Real>, int> = 0>
129 void get_solution (Vector<MF*> const& a_soln);
130
131private:
132
139
142
144
149
150#ifdef AMREX_USE_GPU
152#endif
153
157 HYPRE_Int m_nrows_proc;
158
159 std::unique_ptr<HypreIJIface> m_hypre_ij;
160
161 // Non-owning references to Hypre matrix, rhs, and solution data
162 HYPRE_IJMatrix m_A = nullptr;
163 HYPRE_IJVector m_b = nullptr;
164 HYPRE_IJVector m_x = nullptr;
165};
166
167template <int MSS>
168template <class Marker, class Filler>
170 IntVect const& a_nghost,
171 Geometry const& a_geom,
172 BoxArray const& a_grids,
173 DistributionMapping const& a_dmap, // NOLINT(modernize-pass-by-value)
174 Marker && a_marker,
175 Filler && a_filler,
176 int a_verbose,
177 std::string a_options_namespace)
178 : m_nvars (int(a_index_type.size())),
179 m_index_type (a_index_type),
180 m_nghost (a_nghost),
181 m_geom (a_geom),
182 m_dmap (a_dmap),
183 m_verbose (a_verbose),
184 m_options_namespace(std::move(a_options_namespace))
185{
186 BL_PROFILE("HypreSolver()");
187
188#ifdef AMREX_USE_MPI
189 m_comm = ParallelContext::CommunicatorSub(); // NOLINT(cppcoreguidelines-prefer-member-initializer)
190#endif
191
192 m_grids.resize(m_nvars);
193 m_local_id.resize(m_nvars);
194 m_global_id.resize(m_nvars);
195 m_nrows_grid.resize(m_nvars);
196 m_id_offset.resize(m_nvars);
197 Long nrows_max = 0;
198 for (int ivar = 0; ivar < m_nvars; ++ivar) {
199 m_grids [ivar] = amrex::convert(a_grids,m_index_type[ivar]);
200 m_local_id [ivar].define(m_grids[ivar], m_dmap, 1, 0);
201 m_global_id [ivar].define(m_grids[ivar], m_dmap, 1, m_nghost);
202 m_nrows_grid[ivar].define(m_grids[0], m_dmap);
203 m_id_offset [ivar].define(m_grids[0], m_dmap);
204 nrows_max += m_grids[ivar].numPts();
205 }
206 m_global_id_vec.define(m_grids[0], m_dmap);
208 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(nrows_max < static_cast<Long>(std::numeric_limits<HYPRE_Int>::max()-1),
209 "Need to configure Hypre with --enable-bigint");
210
211 m_owner_mask.resize(m_nvars);
212 for (int ivar = 0; ivar < m_nvars; ++ivar) {
214 }
215
216#ifdef AMREX_USE_GPU
217 m_cell_offset.define(m_grids[0], m_dmap);
218#endif
219
220 fill_local_id(std::forward<Marker>(a_marker));
221
222 // At this point, m_local_id stores the ids local to each box.
223 // m_nrows_grid stores the number of unique points in each box.
224 // m_nrows_proc is the number of rowss for all variables on this MPI
225 // process. If a point is invalid, its id is invalid (i.e., a very
226 // negative number). Note that the data type of local_node_id is int,
227 // not HYPRE_Int for performance on GPU.
228
229 const int nprocs = ParallelContext::NProcsSub();
230 const int myproc = ParallelContext::MyProcSub();
231
232 Vector<HYPRE_Int> nrows_allprocs(nprocs);
233#ifdef AMREX_USE_MPI
234 if (nrows_allprocs.size() > 1) {
235 MPI_Allgather(&m_nrows_proc, sizeof(HYPRE_Int), MPI_CHAR,
236 nrows_allprocs.data(), sizeof(HYPRE_Int), MPI_CHAR, m_comm);
237 } else
238#endif
239 {
240 nrows_allprocs[0] = m_nrows_proc;
241 }
242
243 HYPRE_Int proc_begin = 0;
244 for (int i = 0; i < myproc; ++i) {
245 proc_begin += nrows_allprocs[i];
246 }
247
248 HYPRE_Int proc_end = proc_begin;
249 for (MFIter mfi(m_nrows_grid[0]); mfi.isValid(); ++mfi) {
250 for (int ivar = 0; ivar < m_nvars; ++ivar) {
251 m_id_offset[ivar][mfi] = proc_end;
252 proc_end += m_nrows_grid[ivar][mfi];
253 }
254 }
255 AMREX_ASSERT(proc_end == proc_begin + m_nrows_proc);
256
257 // To generate global ids for Hypre, we need to remove duplicates on
258 // nodes shared by multiple Boxes with OverrideSync. So we need to use
259 // a type that supports atomicAdd. HYPRE_Int is either int or long
260 // long. The latter (i.e., long long) does not have native atomicAdd
261 // support in CUDA/HIP, whereas unsigned long long has.
262 using AtomicInt = std::conditional_t<sizeof(HYPRE_Int) == 4,
263 HYPRE_Int, unsigned long long>;
264 fill_global_id<AtomicInt>();
265
266 // Create and initialize A, b & x
267 HYPRE_Int ilower = proc_begin;
268 HYPRE_Int iupper = proc_end-1;
269 m_hypre_ij = std::make_unique<HypreIJIface>(m_comm, ilower, iupper, m_verbose);
270 m_hypre_ij->parse_inputs(m_options_namespace);
271
272 // Obtain non-owning references to the matrix, rhs, and solution data
273 m_A = m_hypre_ij->A();
274 m_b = m_hypre_ij->b();
275 m_x = m_hypre_ij->x();
276
277 fill_matrix(std::forward<Filler>(a_filler));
278}
279
280template <int MSS>
281template <class Marker>
282#ifdef AMREX_USE_CUDA
283 std::enable_if_t<IsCallable<Marker,int,int,int,int,int>::value>
284#else
285 std::enable_if_t<IsCallableR<bool,Marker,int,int,int,int,int>::value>
286#endif
288{
289 BL_PROFILE("HypreSolver::fill_local_id()");
290
291#ifdef AMREX_USE_GPU
292
293 for (MFIter mfi(m_local_id[0]); mfi.isValid(); ++mfi) {
294 int boxno = mfi.LocalIndex();
295 Long npts_tot = 0;
296 for (int ivar = 0; ivar < m_nvars; ++ivar) {
297 Box const& bx = amrex::convert(mfi.validbox(),m_index_type[ivar]);
298 npts_tot += bx.numPts();
299 }
300 m_cell_offset[mfi].resize(npts_tot);
301 npts_tot = 0;
302 int* p_cell_offset = m_cell_offset[mfi].data();
303 for (int ivar = 0; ivar < m_nvars; ++ivar) {
304 Box const& bx = amrex::convert(mfi.validbox(),m_index_type[ivar]);
305 auto const& lid = m_local_id[ivar].array(mfi);
306 auto const& owner = m_owner_mask[ivar]->const_array(mfi);
307 AMREX_ASSERT(bx.numPts() < static_cast<Long>(std::numeric_limits<int>::max()));
308 const auto npts = static_cast<int>(bx.numPts());
309 int npts_box = amrex::Scan::PrefixSum<int>(npts,
310 [=] AMREX_GPU_DEVICE (int offset) noexcept -> int
311 {
312 const Dim3 cell = bx.atOffset(offset).dim3();
313 int id = (owner ( cell.x,cell.y,cell.z ) &&
314 marker(boxno,cell.x,cell.y,cell.z,ivar)) ? 1 : 0;
315 lid(cell.x,cell.y,cell.z) = id;
316 return id;
317 },
318 [=] AMREX_GPU_DEVICE (int offset, int ps) noexcept
319 {
320 const Dim3 cell = bx.atOffset(offset).dim3();
321 if (lid(cell.x,cell.y,cell.z)) {
322 lid(cell.x,cell.y,cell.z) = ps;
323 p_cell_offset[ps] = offset;
324 } else {
325 lid(cell.x,cell.y,cell.z) = std::numeric_limits<int>::lowest();
326 }
327 },
329 m_nrows_grid[ivar][mfi] = npts_box;
330 npts_tot += npts_box;
331 p_cell_offset += npts_box;
332 }
333 m_cell_offset[mfi].resize(npts_tot);
334 }
335
336#else
337
338#ifdef AMREX_USE_OMP
339#pragma omp parallel
340#endif
341 for (MFIter mfi(m_local_id[0]); mfi.isValid(); ++mfi) {
342 int boxno = mfi.LocalIndex();
343 for (int ivar = 0; ivar < m_nvars; ++ivar) {
344 Box const& bx = amrex::convert(mfi.validbox(),m_index_type[ivar]);
345 auto const& lid = m_local_id[ivar].array(mfi);
346 auto const& owner = m_owner_mask[ivar]->const_array(mfi);
347 int id = 0;
348 const auto lo = amrex::lbound(bx);
349 const auto hi = amrex::ubound(bx);
350 for (int k = lo.z; k <= hi.z; ++k) {
351 for (int j = lo.y; j <= hi.y; ++j) {
352 for (int i = lo.x; i <= hi.x; ++i) {
353 if (owner(i,j,k) && marker(boxno,i,j,k,ivar)) {
354 lid(i,j,k) = id++;
355 } else {
356 lid(i,j,k) = std::numeric_limits<int>::lowest();
357 }
358 }}}
359 m_nrows_grid[ivar][mfi] = id;
360 }
361 }
362#endif
363
364 m_nrows_proc = 0;
365 for (MFIter mfi(m_nrows); mfi.isValid(); ++mfi) {
366 int nrows = 0;
367 for (int ivar = 0; ivar < m_nvars; ++ivar) {
368 nrows += m_nrows_grid[ivar][mfi];
369 }
370 m_nrows[mfi] = nrows;
371 m_nrows_proc += nrows;
372 }
373}
374
375template <int MSS>
376template <typename AI>
377void
379{
380 BL_PROFILE("HypreSolver::fill_global_id()");
381
382 Vector<FabArray<BaseFab<AI>>> global_id_raii;
383 Vector<FabArray<BaseFab<AI>>*> p_global_id;
384
385 if constexpr (std::is_same_v<HYPRE_Int,AI>) {
386 for (int ivar = 0; ivar < m_nvars; ++ivar) {
387 p_global_id.push_back(&(m_global_id[ivar]));
388 }
389 } else {
390 for (int ivar = 0; ivar < m_nvars; ++ivar) {
391 global_id_raii.emplace_back(m_global_id[ivar].boxArray(),
392 m_global_id[ivar].DistributionMap(),
393 1, m_global_id[ivar].nGrowVect());
394 p_global_id.push_back(&(global_id_raii[ivar]));
395 }
396 }
397
398#ifdef AMREX_USE_OMP
399#pragma omp parallel if (Gpu::notInLaunchRegion())
400#endif
401 for (MFIter mfi(m_global_id[0]); mfi.isValid(); ++mfi) {
402 auto& rows_vec = m_global_id_vec[mfi];
403 rows_vec.resize(m_nrows[mfi]);
404
405 HYPRE_Int nrows = 0;
406 for (int ivar = 0; ivar < m_nvars; ++ivar) {
407 HYPRE_Int const os = m_id_offset[ivar][mfi];
408 Box bx = mfi.validbox();
409 bx.convert(m_index_type[ivar]).grow(m_nghost);
410 Array4<AI> const& gid = p_global_id[ivar]->array(mfi);
411 auto const& lid = m_local_id[ivar].const_array(mfi);
412 HYPRE_Int* rows = rows_vec.data() + nrows;
413 nrows += m_nrows_grid[ivar][mfi];
414 amrex::ParallelFor(bx,[=] AMREX_GPU_DEVICE (int i, int j, int k) noexcept
415 {
416 if (lid.contains(i,j,k) && lid(i,j,k) >= 0) {
417 const auto id = lid(i,j,k) + os;
418 rows[lid(i,j,k)] = id;
419 gid(i,j,k) = static_cast<AI>(id);
420 } else {
421 gid(i,j,k) = static_cast<AI>
422 (std::numeric_limits<HYPRE_Int>::max());
423 }
424 });
425 }
426 }
427
428 for (int ivar = 0; ivar < m_nvars; ++ivar) {
429 amrex::OverrideSync(*p_global_id[ivar], *m_owner_mask[ivar],
430 m_geom.periodicity());
431 p_global_id[ivar]->FillBoundary(m_geom.periodicity());
432
433 if constexpr (!std::is_same<HYPRE_Int, AI>()) {
434 auto const& dst = m_global_id[ivar].arrays();
435 auto const& src = p_global_id[ivar]->const_arrays();
436 amrex::ParallelFor(m_global_id[ivar], m_global_id[ivar].nGrowVect(),
437 [=] AMREX_GPU_DEVICE (int b, int i, int j, int k)
438 {
439 dst[b](i,j,k) = static_cast<HYPRE_Int>(src[b](i,j,k));
440 });
441 }
442 }
443}
444
445#ifdef AMREX_USE_GPU
447namespace detail {
448template <typename T>
449void pack_matrix_gpu (Gpu::DeviceVector<HYPRE_Int>& cols_tmp,
453{
454 auto* p_cols_tmp = cols_tmp.data();
455 auto* p_mat_tmp = mat_tmp.data();
456 auto const* p_cols = cols.data();
457 auto const* p_mat = mat.data();
458 const auto N = Long(cols.size());
459 Scan::PrefixSum<T>(N,
460 [=] AMREX_GPU_DEVICE (Long i) -> T
461 {
462 return static_cast<T>(p_cols[i] >= 0);
463 },
464 [=] AMREX_GPU_DEVICE (Long i, T s)
465 {
466 if (p_cols[i] >= 0) {
467 p_cols_tmp[s] = p_cols[i];
468 p_mat_tmp[s] = p_mat[i];
469 }
470 },
471 Scan::Type::exclusive, Scan::noRetSum);
472 std::swap(cols_tmp, cols);
473 std::swap(mat_tmp, mat);
474}
475}
477#endif
478
479template <int MSS>
480template <class Filler,
481 std::enable_if_t<IsCallable<Filler,int,int,int,int,int,
482 Array4<HYPRE_Int const> const*,
483 HYPRE_Int&, HYPRE_Int*,
484 HYPRE_Real*>::value,int> FOO>
485void
486HypreSolver<MSS>::fill_matrix (Filler const& filler)
487{
488 BL_PROFILE("HypreSolver::fill_matrix()");
489
493
494 MFItInfo mfitinfo;
496 for (MFIter mfi(m_local_id[0],mfitinfo); mfi.isValid(); ++mfi)
497 {
498 int boxno = mfi.LocalIndex();
499 const HYPRE_Int nrows = m_nrows[mfi];
500 if (nrows > 0)
501 {
502 ncols_vec.clear();
503 ncols_vec.resize(nrows);
504 HYPRE_Int* ncols = ncols_vec.data();
505
506 cols_vec.clear();
507 cols_vec.resize(Long(nrows)*MSS, -1);
508 HYPRE_Int* cols = cols_vec.data();
509
510 mat_vec.clear();
511 mat_vec.resize(Long(nrows)*MSS);
512 HYPRE_Real* mat = mat_vec.data();
513
514 Vector<Array4<HYPRE_Int const>> gid_v(m_nvars);
515 for (int ivar = 0; ivar < m_nvars; ++ivar) {
516 gid_v[ivar] = m_global_id[ivar].const_array(mfi);
517 }
518
519#ifdef AMREX_USE_GPU
521 (gid_v.data(), gid_v.size());
522 auto const* pgid = gid_buf.data();
523 auto const* p_cell_offset = m_cell_offset[mfi].data();
524 Long ntot = 0;
525 for (int ivar = 0; ivar < m_nvars; ++ivar) {
526 const HYPRE_Int nrows_var = m_nrows_grid[ivar][mfi];
527 if (nrows_var > 0) {
528 Box const& bx = amrex::convert(mfi.validbox(),m_index_type[ivar]);
529 ntot += Reduce::Sum<Long>(nrows_var,
530 [=] AMREX_GPU_DEVICE (HYPRE_Int offset)
531 {
532 const Dim3 cell = bx.atOffset(p_cell_offset[offset]).dim3();
533 filler(boxno, cell.x, cell.y, cell.z, ivar, pgid,
534 ncols[offset], cols+Long(offset)*MSS,
535 mat+Long(offset)*MSS);
536 return ncols[offset];
537 });
538 p_cell_offset += nrows_var;
539 ncols += nrows_var;
540 cols += Long(nrows_var)*MSS;
541 mat += Long(nrows_var)*MSS;
542 }
543 }
544 Gpu::DeviceVector<HYPRE_Int> cols_tmp(ntot);
545 Gpu::DeviceVector<HYPRE_Real> mat_tmp(ntot);
546 if (ntot >= Long(std::numeric_limits<int>::max())) {
547 detail::pack_matrix_gpu<Long>(cols_tmp, mat_tmp, cols_vec, mat_vec);
548 } else {
549 detail::pack_matrix_gpu<int>(cols_tmp, mat_tmp, cols_vec, mat_vec);
550 }
551#else
552 auto* pgid = gid_v.data();
553 for (int ivar = 0; ivar < m_nvars; ++ivar) {
554 if (m_nrows_grid[ivar][mfi] > 0) {
555 auto const& lid = m_local_id[ivar].const_array(mfi);
556 amrex::Loop(amrex::convert(mfi.validbox(),m_index_type[ivar]),
557 [=,&ncols,&cols,&mat] (int i, int j, int k)
558 {
559 if (lid(i,j,k) >= 0) {
560 filler(boxno, i, j, k, ivar, pgid, *ncols, cols, mat);
561 cols += (*ncols);
562 mat += (*ncols);
563 ++ncols;
564 }
565 });
566 }
567 }
568#endif
569
570 const auto& rows_vec = m_global_id_vec[mfi];
571 HYPRE_Int const* rows = rows_vec.data();
572
574 HYPRE_IJMatrixSetValues(m_A, nrows, ncols_vec.data(), rows,
575 cols_vec.data(), mat_vec.data());
576 Gpu::hypreSynchronize();
577 }
578 }
579 HYPRE_IJMatrixAssemble(m_A);
580}
581
582template <int MSS>
583template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
584 std::is_same_v<typename MF::value_type,
585 HYPRE_Real>, int> FOO>
586void
588 Vector<MF const*> const& a_rhs,
589 HYPRE_Real rel_tol, HYPRE_Real abs_tol, int max_iter)
590{
591 BL_PROFILE("HypreSolver::solve()");
592
593 AMREX_ASSERT(a_soln.size() == m_nvars && a_rhs.size() == m_nvars);
594
595 HYPRE_IJVectorInitialize(m_b);
596 HYPRE_IJVectorInitialize(m_x);
597
598 load_vectors(a_soln, a_rhs);
599
600 HYPRE_IJVectorAssemble(m_x);
601 HYPRE_IJVectorAssemble(m_b);
602
603 m_hypre_ij->solve(rel_tol, abs_tol, max_iter);
604
605 get_solution(a_soln);
606}
607
608template <int MSS>
609template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
610 std::is_same_v<typename MF::value_type,
611 HYPRE_Real>, int> FOO>
612void
614 Vector<MF const*> const& a_rhs)
615{
616 BL_PROFILE("HypreSolver::load_vectors()");
617
618 MFItInfo mfitinfo;
620
623 for (MFIter mfi(*a_soln[0],mfitinfo); mfi.isValid(); ++mfi)
624 {
625 const HYPRE_Int nrows = m_nrows[mfi];
626 if (nrows > 0)
627 {
628 xvec.clear();
629 xvec.resize(nrows);
630 bvec.clear();
631 bvec.resize(nrows);
632 auto* xp = xvec.data();
633 auto* bp = bvec.data();
634
635 HYPRE_Int const* rows = m_global_id_vec[mfi].data();
636
637 HYPRE_Int offset = 0;
638 for (int ivar = 0; ivar < m_nvars; ++ivar) {
639 if (m_nrows_grid[ivar][mfi] > 0) {
640 auto const& xfab = a_soln[ivar]->const_array(mfi);
641 auto const& bfab = a_rhs [ivar]->const_array(mfi);
642 auto const& lid = m_local_id[ivar].const_array(mfi);
643 HYPRE_Real* x = xp + offset;
644 HYPRE_Real* b = bp + offset;
645 Box box = amrex::convert(mfi.validbox(),m_index_type[ivar]);
646 amrex::ParallelFor(box,[=] AMREX_GPU_DEVICE (int i, int j, int k)
647 {
648 if (lid(i,j,k) >= 0) {
649 x[lid(i,j,k)] = xfab(i,j,k);
650 b[lid(i,j,k)] = bfab(i,j,k);
651 }
652 });
653 offset += m_nrows_grid[ivar][mfi];
654 }
655 }
656
657 Gpu::streamSynchronize();
658 HYPRE_IJVectorSetValues(m_x, nrows, rows, xp);
659 HYPRE_IJVectorSetValues(m_b, nrows, rows, bp);
660 Gpu::hypreSynchronize();
661 }
662 }
663}
664
665template <int MSS>
666template <class MF, std::enable_if_t<IsFabArray<MF>::value &&
667 std::is_same_v<typename MF::value_type,
668 HYPRE_Real>, int> FOO>
669void
671{
672 BL_PROFILE("HypreSolver::get_solution()");
673
674 MFItInfo mfitinfo;
676
678 for (MFIter mfi(*a_soln[0],mfitinfo); mfi.isValid(); ++mfi)
679 {
680 const HYPRE_Int nrows = m_nrows[mfi];
681 if (nrows > 0)
682 {
683 xvec.clear();
684 xvec.resize(nrows);
685 auto* xp = xvec.data();
686
687 HYPRE_Int const* rows = m_global_id_vec[mfi].data();
688
689 HYPRE_IJVectorGetValues(m_x, nrows, rows, xp);
690 Gpu::hypreSynchronize();
691
692 HYPRE_Int offset = 0;
693 for (int ivar = 0; ivar < m_nvars; ++ivar) {
694 if (m_nrows_grid[ivar][mfi] > 0) {
695 auto const& xfab = a_soln[ivar]->array(mfi);
696 auto const& lid = m_local_id[ivar].const_array(mfi);
697 HYPRE_Real* x = xp + offset;
698 Box box = amrex::convert(mfi.validbox(),m_index_type[ivar]);
699 amrex::ParallelFor(box,[=] AMREX_GPU_DEVICE (int i, int j, int k)
700 {
701 if (lid(i,j,k) >= 0) {
702 xfab(i,j,k) = x[lid(i,j,k)];
703 }
704 });
705 offset += m_nrows_grid[ivar][mfi];
706 }
707 }
708 Gpu::streamSynchronize();
709 }
710 }
711
712 for (int ivar = 0; ivar < m_nvars; ++ivar) {
713 amrex::OverrideSync(*a_soln[ivar], *m_owner_mask[ivar],
714 m_geom.periodicity());
715 }
716}
717
718}
719
720#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
__host__ __device__ BoxND & grow(int i) noexcept
Definition AMReX_Box.H:641
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ BoxND & convert(IndexTypeND< dim > typ) noexcept
Convert the BoxND from the current type into the argument type. This may change the BoxND coordinates...
Definition AMReX_Box.H:974
__host__ __device__ IntVectND< dim > atOffset(Long offset) const noexcept
Given the offset, compute IntVectND<dim>
Definition AMReX_Box.H:1071
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:356
Definition AMReX_GpuBuffer.H:18
T const * data() const noexcept
Definition AMReX_GpuBuffer.H:45
Solve Ax = b using HYPRE's generic IJ matrix format where A is a sparse matrix specified using the co...
Definition AMReX_HypreSolver.H:33
HypreSolver(Vector< IndexType > const &a_index_type, IntVect const &a_nghost, Geometry const &a_geom, BoxArray const &a_grids, DistributionMapping const &a_dmap, Marker &&a_marker, Filler &&a_filler, int a_verbose=0, std::string a_options_namespace="hypre")
Definition AMReX_HypreSolver.H:169
LayoutData< HYPRE_Int > m_nrows
Definition AMReX_HypreSolver.H:156
IntVect m_nghost
Definition AMReX_HypreSolver.H:135
Vector< LayoutData< HYPRE_Int > > m_nrows_grid
Definition AMReX_HypreSolver.H:154
int getNumIters() const
Definition AMReX_HypreSolver.H:90
Geometry m_geom
Definition AMReX_HypreSolver.H:136
LayoutData< Gpu::DeviceVector< HYPRE_Int > > m_global_id_vec
Definition AMReX_HypreSolver.H:148
std::enable_if_t< IsCallable< Marker, int, int, int, int, int >::value > fill_local_id(Marker const &marker)
Definition AMReX_HypreSolver.H:287
std::unique_ptr< HypreIJIface > m_hypre_ij
Definition AMReX_HypreSolver.H:159
int m_nvars
Definition AMReX_HypreSolver.H:133
Vector< LayoutData< HYPRE_Int > > m_id_offset
Definition AMReX_HypreSolver.H:155
MPI_Comm m_comm
Definition AMReX_HypreSolver.H:143
void get_solution(Vector< MF * > const &a_soln)
Definition AMReX_HypreSolver.H:670
HYPRE_IJVector getx() const
Definition AMReX_HypreSolver.H:98
void load_vectors(Vector< MF * > const &a_soln, Vector< MF const * > const &a_rhs)
Definition AMReX_HypreSolver.H:613
HYPRE_IJMatrix getA() const
Definition AMReX_HypreSolver.H:96
std::string m_options_namespace
Definition AMReX_HypreSolver.H:141
void fill_matrix(Filler const &filler)
Definition AMReX_HypreSolver.H:486
HYPRE_IJMatrix m_A
Definition AMReX_HypreSolver.H:162
void fill_global_id()
Definition AMReX_HypreSolver.H:378
void solve(Vector< MF * > const &a_soln, Vector< MF const * > const &a_rhs, HYPRE_Real rel_tol, HYPRE_Real abs_tol, int max_iter)
Definition AMReX_HypreSolver.H:587
HYPRE_Int m_nrows_proc
Definition AMReX_HypreSolver.H:157
LayoutData< Gpu::DeviceVector< int > > m_cell_offset
Definition AMReX_HypreSolver.H:151
Vector< FabArray< BaseFab< HYPRE_Int > > > m_global_id
Definition AMReX_HypreSolver.H:147
HYPRE_Real getFinalResidualNorm() const
Definition AMReX_HypreSolver.H:92
Vector< std::unique_ptr< iMultiFab > > m_owner_mask
Definition AMReX_HypreSolver.H:145
HYPRE_IJVector m_b
Definition AMReX_HypreSolver.H:163
Vector< IndexType > m_index_type
Definition AMReX_HypreSolver.H:134
Vector< BoxArray > m_grids
Definition AMReX_HypreSolver.H:137
Vector< iMultiFab > m_local_id
Definition AMReX_HypreSolver.H:146
int m_verbose
Definition AMReX_HypreSolver.H:140
DistributionMapping m_dmap
Definition AMReX_HypreSolver.H:138
HYPRE_IJVector m_x
Definition AMReX_HypreSolver.H:164
HYPRE_IJVector getb() const
Definition AMReX_HypreSolver.H:97
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
void define(const BoxArray &a_grids, const DistributionMapping &a_dm)
Definition AMReX_LayoutData.H:25
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:63
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:147
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
size_type size() const noexcept
Definition AMReX_PODVector.H:648
void resize(size_type a_new_size, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:728
void clear() noexcept
Definition AMReX_PODVector.H:646
T * data() noexcept
Definition AMReX_PODVector.H:666
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
amrex_long Long
Definition AMReX_INT.H:30
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:263
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int NProcsSub() noexcept
number of ranks in current frame
Definition AMReX_ParallelContext.H:74
static constexpr struct amrex::Scan::Type::Exclusive exclusive
int MPI_Comm
Definition AMReX_ccse-mpi.H:51
static constexpr int MPI_COMM_NULL
Definition AMReX_ccse-mpi.H:59
Definition AMReX_Amr.cpp:49
__host__ __device__ Dim3 ubound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:319
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
IntVect nGrowVect(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2859
void OverrideSync(FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
Definition AMReX_FabArrayUtility.H:1442
std::unique_ptr< iMultiFab > OwnerMask(FabArrayBase const &mf, const Periodicity &period, const IntVect &ngrow)
Definition AMReX_iMultiFab.cpp:699
__host__ __device__ void Loop(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition AMReX_Loop.H:127
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2864
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:312
Definition AMReX_Array4.H:61
Definition AMReX_Dim3.H:12
int x
Definition AMReX_Dim3.H:12
int z
Definition AMReX_Dim3.H:12
int y
Definition AMReX_Dim3.H:12
Test if a given type T is callable with arguments of type Args...
Definition AMReX_TypeTraits.H:213
Definition AMReX_MFIter.H:20
MFItInfo & DisableDeviceSync() noexcept
Definition AMReX_MFIter.H:38
MFItInfo & UseDefaultStream() noexcept
Definition AMReX_MFIter.H:50