Block-Structured AMR Software Framework
 
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
AMReX_MLMG.H
Go to the documentation of this file.
1#ifndef AMREX_ML_MG_H_
2#define AMREX_ML_MG_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MLLinOp.H>
6#include <AMReX_MLCGSolver.H>
7
8namespace amrex {
9
10template <typename MF>
11class MLMGT
12{
13public:
14
15 class error
16 : public std::runtime_error
17 {
18 public :
19 using std::runtime_error::runtime_error;
20 };
21
22 template <typename T> friend class MLCGSolverT;
23 template <typename M> friend class GMRESMLMGT;
24
25 using MFType = MF;
26 using FAB = typename MLLinOpT<MF>::FAB;
27 using RT = typename MLLinOpT<MF>::RT;
28
29 using BCMode = typename MLLinOpT<MF>::BCMode;
31
33 enum class CFStrategy : int {none,ghostnodes};
34
35 MLMGT (MLLinOpT<MF>& a_lp);
37
38 MLMGT (MLMGT<MF> const&) = delete;
39 MLMGT (MLMGT<MF> &&) = delete;
40 MLMGT<MF>& operator= (MLMGT<MF> const&) = delete;
42
43 // Optional argument checkpoint_file is for debugging only.
44 template <typename AMF>
45 RT solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
46 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
47
48 template <typename AMF>
49 RT solve (std::initializer_list<AMF*> a_sol,
50 std::initializer_list<AMF const*> a_rhs,
51 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
52
53 RT precond (Vector<MF*> const& a_sol, Vector<MF const*> const& a_rhs,
54 RT a_tol_rel, RT a_tol_abs);
55
56 template <typename AMF>
57 void getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol,
58 Location a_loc = Location::FaceCenter);
59
60 template <typename AMF>
61 void getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol,
62 Location a_loc = Location::FaceCenter);
63
67 template <typename AMF>
68 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
69 Location a_loc = Location::FaceCenter);
70
71 template <typename AMF>
72 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
73 Location a_loc = Location::FaceCenter);
74
75 template <typename AMF>
76 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
77 const Vector<AMF*> & a_sol,
78 Location a_loc = Location::FaceCenter);
79
80 template <typename AMF>
81 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
82 std::initializer_list<AMF*> a_sol,
83 Location a_loc = Location::FaceCenter);
84
85 template <typename AMF>
86 void getFluxes (const Vector<AMF*> & a_flux,
87 Location a_loc = Location::CellCenter);
88
89 template <typename AMF>
90 void getFluxes (std::initializer_list<AMF*> a_flux,
91 Location a_loc = Location::CellCenter);
92
93 template <typename AMF>
94 void getFluxes (const Vector<AMF*> & a_flux,
95 const Vector<AMF*> & a_sol,
96 Location a_loc = Location::CellCenter);
97
98 template <typename AMF>
99 void getFluxes (std::initializer_list<AMF*> a_flux,
100 std::initializer_list<AMF*> a_sol,
101 Location a_loc = Location::CellCenter);
102
103 void compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
104 const Vector<MF const*>& a_rhs);
105
106#ifdef AMREX_USE_EB
107 // Flux into the EB wall
108 void getEBFluxes (const Vector<MF*>& a_eb_flux);
109 void getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*> & a_sol);
110#endif
111
117 void apply (const Vector<MF*>& out, const Vector<MF*>& in);
118
120 void applyPrecond (const Vector<MF*>& out, const Vector<MF*>& in);
121
122 [[nodiscard]] int getVerbose () const { return verbose; }
123 [[nodiscard]] int getBottomVerbose () const { return bottom_verbose; }
124
125 void incPrintIdentation ();
126 void decPrintIdentation ();
127
128 void setThrowException (bool t) noexcept { throw_exception = t; }
129 void setVerbose (int v) noexcept { verbose = v; }
130 void setMaxIter (int n) noexcept { max_iters = n; }
131 void setMaxFmgIter (int n) noexcept { max_fmg_iters = n; }
132 void setFixedIter (int nit) noexcept { do_fixed_number_of_iters = nit; }
133 void setPrecondIter (int nit) noexcept { max_precond_iters = nit; }
134
135 void setPreSmooth (int n) noexcept { nu1 = n; }
136 void setPostSmooth (int n) noexcept { nu2 = n; }
137 void setFinalSmooth (int n) noexcept { nuf = n; }
138 void setBottomSmooth (int n) noexcept { nub = n; }
139
140 void setBottomSolver (BottomSolver s) noexcept { bottom_solver = s; }
141 [[nodiscard]] BottomSolver getBottomSolver () const noexcept { return bottom_solver; }
142 void setCFStrategy (CFStrategy a_cf_strategy) noexcept {cf_strategy = a_cf_strategy;}
143 void setBottomVerbose (int v) noexcept { bottom_verbose = v; }
144 void setBottomMaxIter (int n) noexcept { bottom_maxiter = n; }
145 void setBottomTolerance (RT t) noexcept { bottom_reltol = t; }
146 void setBottomToleranceAbs (RT t) noexcept { bottom_abstol = t;}
147 [[nodiscard]] RT getBottomToleranceAbs () const noexcept{ return bottom_abstol; }
148
149 void setAlwaysUseBNorm (int flag) noexcept { always_use_bnorm = flag; }
150
151 void setFinalFillBC (int flag) noexcept { final_fill_bc = flag; }
152
153 [[nodiscard]] int numAMRLevels () const noexcept { return namrlevs; }
154
155 void setNSolve (int flag) noexcept { do_nsolve = flag; }
156 void setNSolveGridSize (int s) noexcept { nsolve_grid_size = s; }
157
158 void setNoGpuSync (bool do_not_sync) noexcept { do_no_sync_gpu = do_not_sync; }
159
160#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
161 void setHypreInterface (Hypre::Interface f) noexcept {
162 // must use ij interface for EB
163#ifndef AMREX_USE_EB
164 hypre_interface = f;
165#else
167#endif
168 }
169
171 void setHypreOptionsNamespace(const std::string& prefix) noexcept
172 {
173 hypre_options_namespace = prefix;
174 }
175
176 void setHypreOldDefault (bool l) noexcept {hypre_old_default = l;}
177 void setHypreRelaxType (int n) noexcept {hypre_relax_type = n;}
178 void setHypreRelaxOrder (int n) noexcept {hypre_relax_order = n;}
179 void setHypreNumSweeps (int n) noexcept {hypre_num_sweeps = n;}
180 void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
181#endif
182
183 void prepareForFluxes (Vector<MF const*> const& a_sol);
184
185 template <typename AMF>
186 void prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs);
187
188 void prepareForNSolve ();
189
190 void prepareLinOp ();
191
192 void preparePrecond ();
193
194 void oneIter (int iter);
195
196 void miniCycle (int amrlev);
197
198 void mgVcycle (int amrlev, int mglev);
199 void mgFcycle ();
200
201 void bottomSolve ();
202 void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
203 void actualBottomSolve ();
204
205 void computeMLResidual (int amrlevmax);
206 void computeResidual (int alev);
207 void computeResWithCrseSolFineCor (int calev, int falev);
208 void computeResWithCrseCorFineCor (int falev);
209 void interpCorrection (int alev);
210 void interpCorrection (int alev, int mglev);
211 void addInterpCorrection (int alev, int mglev);
212
213 void computeResOfCorrection (int amrlev, int mglev);
214
215 RT ResNormInf (int alev, bool local = false);
216 RT MLResNormInf (int alevmax, bool local = false);
217 RT MLRhsNormInf (bool local = false);
218
219 void makeSolvable ();
220 void makeSolvable (int amrlev, int mglev, MF& mf);
221
222#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
223 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
224 void bottomSolveWithHypre (MF& x, const MF& b);
225#endif
226
227#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
228 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
229 void bottomSolveWithPETSc (MF& x, const MF& b);
230#endif
231
232 int bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type);
233
234 [[nodiscard]] RT getInitRHS () const noexcept { return m_rhsnorm0; }
235 // Initial composite residual
236 [[nodiscard]] RT getInitResidual () const noexcept { return m_init_resnorm0; }
237 // Final composite residual
238 [[nodiscard]] RT getFinalResidual () const noexcept { return m_final_resnorm0; }
239 // Residuals on the *finest* AMR level after each iteration
240 [[nodiscard]] Vector<RT> const& getResidualHistory () const noexcept { return m_iter_fine_resnorm0; }
241 [[nodiscard]] int getNumIters () const noexcept { return m_iter_fine_resnorm0.size(); }
242 [[nodiscard]] Vector<int> const& getNumCGIters () const noexcept { return m_niters_cg; }
243
245
246private:
247
248 bool precond_mode = false;
249 bool throw_exception = false;
250 int verbose = 1;
251
252 int max_iters = 200;
255
256 int nu1 = 2;
257 int nu2 = 2;
258 int nuf = 8;
259 int nub = 0;
260
262
266 int bottom_maxiter = 200;
267 RT bottom_reltol = std::is_same<RT,double>() ? RT(1.e-4) : RT(1.e-3);
269
271
273
275 int ncomp;
278
279 bool linop_prepared = false;
280 Long solve_called = 0;
281
283 int do_nsolve = false;
285 std::unique_ptr<MLLinOpT<MF>> ns_linop;
286 std::unique_ptr<MLMGT<MF>> ns_mlmg;
287 std::unique_ptr<MF> ns_sol;
288 std::unique_ptr<MF> ns_rhs;
289
290 std::string print_ident;
291
292 bool do_no_sync_gpu = false;
293
295#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
296 // Hypre::Interface hypre_interface = Hypre::Interface::structed;
297 // Hypre::Interface hypre_interface = Hypre::Interface::semi_structed;
298 Hypre::Interface hypre_interface = Hypre::Interface::ij;
299
300 std::unique_ptr<Hypre> hypre_solver;
301 std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
302 std::unique_ptr<HypreNodeLap> hypre_node_solver;
303
304 std::string hypre_options_namespace = "hypre";
305 bool hypre_old_default = true; // Falgout coarsening with modified classical interpolation
306 int hypre_relax_type = 6; // G-S/Jacobi hybrid relaxation
307 int hypre_relax_order = 1; // uses C/F relaxation
308 int hypre_num_sweeps = 2; // Sweeps on each level
309 Real hypre_strong_threshold = 0.25; // Hypre default is 0.25
310#endif
311
313#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
314 std::unique_ptr<PETScABecLap> petsc_solver;
315 std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
316#endif
317
325
327
337
340
341 RT m_rhsnorm0 = RT(-1.0);
345 Vector<RT> m_iter_fine_resnorm0; // Residual for each iteration at the finest level
346
347 void checkPoint (const Vector<MultiFab*>& a_sol,
348 const Vector<MultiFab const*>& a_rhs,
349 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const;
350
351};
352
353template <typename MF>
355 : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
356 finest_amr_lev(a_lp.NAMRLevels()-1)
357{}
358
359template <typename MF> MLMGT<MF>::~MLMGT () = default;
360
361template <typename MF>
362template <typename AMF>
363auto
364MLMGT<MF>::solve (std::initializer_list<AMF*> a_sol,
365 std::initializer_list<AMF const*> a_rhs,
366 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
367{
368 return solve(Vector<AMF*>(std::move(a_sol)),
369 Vector<AMF const*>(std::move(a_rhs)),
370 a_tol_rel, a_tol_abs, checkpoint_file);
371}
372
373template <typename MF>
374template <typename AMF>
375auto
377 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
378{
379 BL_PROFILE("MLMG::solve()");
380
381 bool prev_in_single_stream_region = false;
382 bool prev_in_nosync_region = false;
383
384 if (do_no_sync_gpu) {
385 prev_in_single_stream_region = Gpu::setSingleStreamRegion(true);
386 prev_in_nosync_region = Gpu::setNoSyncRegion(true);
387 }
388
389 if constexpr (std::is_same<AMF,MultiFab>()) {
390 if (checkpoint_file != nullptr) {
391 checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
392 }
393 }
394
395 if (bottom_solver == BottomSolver::Default) {
396 bottom_solver = linop.getDefaultBottomSolver();
397 }
398
399#if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
400 if constexpr (IsFabArray_v<AMF>) {
401 if (bottom_solver == BottomSolver::hypre || bottom_solver == BottomSolver::petsc) {
402 int mo = linop.getMaxOrder();
403 if (a_sol[0]->hasEBFabFactory()) {
404 linop.setMaxOrder(2);
405 } else {
406 linop.setMaxOrder(std::min(3,mo)); // maxorder = 4 not supported
407 }
408 }
409 }
410#endif
411
412 bool is_nsolve = linop.m_parent;
413
414 auto solve_start_time = amrex::second();
415
416 RT& composite_norminf = m_final_resnorm0;
417
418 m_niters_cg.clear();
419 m_iter_fine_resnorm0.clear();
420
421 prepareForSolve(a_sol, a_rhs);
422
423 computeMLResidual(finest_amr_lev);
424
425 bool local = true;
426 RT resnorm0 = MLResNormInf(finest_amr_lev, local);
427 RT rhsnorm0 = MLRhsNormInf(local);
428 if (!is_nsolve) {
429 ParallelAllReduce::Max<RT>({resnorm0, rhsnorm0}, ParallelContext::CommunicatorSub());
430
431 if (verbose >= 1)
432 {
433 amrex::Print() << print_ident << "MLMG: Initial rhs = " << rhsnorm0 << "\n"
434 << print_ident << "MLMG: Initial residual (resid0) = " << resnorm0 << "\n";
435 }
436 }
437
438 m_init_resnorm0 = resnorm0;
439 m_rhsnorm0 = rhsnorm0;
440
441 RT max_norm;
442 std::string norm_name;
443 if (always_use_bnorm || rhsnorm0 >= resnorm0) {
444 norm_name = "bnorm";
445 max_norm = rhsnorm0;
446 } else {
447 norm_name = "resid0";
448 max_norm = resnorm0;
449 }
450 const RT res_target = std::max(a_tol_abs, std::max(a_tol_rel,RT(1.e-16))*max_norm);
451
452 if (!is_nsolve && resnorm0 <= res_target) {
453 composite_norminf = resnorm0;
454 if (verbose >= 1) {
455 amrex::Print() << print_ident << "MLMG: No iterations needed\n";
456 }
457 } else {
458 auto iter_start_time = amrex::second();
459 bool converged = false;
460
461 const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
462 for (int iter = 0; iter < niters; ++iter)
463 {
464 oneIter(iter);
465
466 converged = false;
467
468 // Test convergence on the fine amr level
469 computeResidual(finest_amr_lev);
470
471 if (is_nsolve) { continue; }
472
473 RT fine_norminf = ResNormInf(finest_amr_lev);
474 m_iter_fine_resnorm0.push_back(fine_norminf);
475 composite_norminf = fine_norminf;
476 if (verbose >= 2) {
477 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1 << " Fine resid/"
478 << norm_name << " = " << fine_norminf/max_norm << "\n";
479 }
480 bool fine_converged = (fine_norminf <= res_target);
481
482 if (namrlevs == 1 && fine_converged) {
483 converged = true;
484 } else if (fine_converged) {
485 // finest level is converged, but we still need to test the coarse levels
486 computeMLResidual(finest_amr_lev-1);
487 RT crse_norminf = MLResNormInf(finest_amr_lev-1);
488 if (verbose >= 2) {
489 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1
490 << " Crse resid/" << norm_name << " = "
491 << crse_norminf/max_norm << "\n";
492 }
493 converged = (crse_norminf <= res_target);
494 composite_norminf = std::max(fine_norminf, crse_norminf);
495 } else {
496 converged = false;
497 }
498
499 if (converged) {
500 if (verbose >= 1) {
501 amrex::Print() << print_ident << "MLMG: Final Iter. " << iter+1
502 << " resid, resid/" << norm_name << " = "
503 << composite_norminf << ", "
504 << composite_norminf/max_norm << "\n";
505 }
506 break;
507 } else {
508 if (composite_norminf > RT(1.e20)*max_norm)
509 {
510 if (verbose > 0) {
511 amrex::Print() << print_ident << "MLMG: Failing to converge after " << iter+1 << " iterations."
512 << " resid, resid/" << norm_name << " = "
513 << composite_norminf << ", "
514 << composite_norminf/max_norm << "\n";
515 }
516
517 if ( throw_exception ) {
518 throw error("MLMG blew up.");
519 } else {
520 amrex::Abort("MLMG failing so lets stop here");
521 }
522 }
523 }
524 }
525
526 if (!converged && do_fixed_number_of_iters == 0) {
527 if (verbose > 0) {
528 amrex::Print() << print_ident << "MLMG: Failed to converge after " << max_iters << " iterations."
529 << " resid, resid/" << norm_name << " = "
530 << composite_norminf << ", "
531 << composite_norminf/max_norm << "\n";
532 }
533
534 if ( throw_exception ) {
535 throw error("MLMG failed to converge.");
536 } else {
537 amrex::Abort("MLMG failed.");
538 }
539 }
540 timer[iter_time] = amrex::second() - iter_start_time;
541 }
542
543 linop.postSolve(GetVecOfPtrs(sol));
544
545 IntVect ng_back = final_fill_bc ? IntVect(1) : IntVect(0);
546 if (linop.hasHiddenDimension()) {
547 ng_back[linop.hiddenDirection()] = 0;
548 }
549 for (int alev = 0; alev < namrlevs; ++alev)
550 {
551 if (!sol_is_alias[alev]) {
552 LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
553 }
554 }
555
556 timer[solve_time] = amrex::second() - solve_start_time;
557 if (verbose >= 1) {
558 ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
561 {
562 amrex::AllPrint() << print_ident << "MLMG: Timers: Solve = " << timer[solve_time]
563 << " Iter = " << timer[iter_time]
564 << " Bottom = " << timer[bottom_time] << "\n";
565 }
566 }
567
568 ++solve_called;
569
570 if (do_no_sync_gpu) {
571 (void)Gpu::setSingleStreamRegion(prev_in_single_stream_region);
572 (void)Gpu::setNoSyncRegion(prev_in_nosync_region);
573 }
574
575 return composite_norminf;
576}
577
578template <typename MF>
579auto
581 RT a_tol_rel, RT a_tol_abs) -> RT
582{
583 precond_mode = true;
584 std::swap(max_precond_iters, do_fixed_number_of_iters);
585 linop.beginPrecondBC();
586
587 auto r = solve(a_sol, a_rhs, a_tol_rel, a_tol_abs);
588
589 linop.endPrecondBC();
590 std::swap(max_precond_iters, do_fixed_number_of_iters);
591 precond_mode = false;
592
593 return r;
594}
595
596template <typename MF>
597void
599{
600 for (int alev = finest_amr_lev; alev >= 0; --alev) {
601 const MF* crse_bcdata = (alev > 0) ? a_sol[alev-1] : nullptr;
602 linop.prepareForFluxes(alev, crse_bcdata);
603 }
604}
605
606template <typename MF>
607template <typename AMF>
608void
610{
611 BL_PROFILE("MLMG::getGradSolution()");
612 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
613 if constexpr (std::is_same<AMF,MF>()) {
614 linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
615 } else {
617 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
618 auto const& amf = *(a_grad_sol[alev][idim]);
619 grad_sol[idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
620 }
621 linop.compGrad(alev, GetArrOfPtrs(grad_sol), sol[alev], a_loc);
622 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
623 LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp, IntVect(0));
624 }
625 }
626 }
627}
628
629template <typename MF>
630template <typename AMF>
631void
632MLMGT<MF>::getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol, Location a_loc)
633{
634 getGradSolution(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_grad_sol)), a_loc);
635}
636
637template <typename MF>
638template <typename AMF>
639void
641 Location a_loc)
642{
643 if (!linop.isCellCentered()) {
644 amrex::Abort("Calling wrong getFluxes for nodal solver");
645 }
646
647 AMREX_ASSERT(sol.size() == a_flux.size());
648
649 if constexpr (std::is_same<AMF,MF>()) {
650 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
651 } else {
652 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
653 for (int ilev = 0; ilev < namrlevs; ++ilev) {
654 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
655 auto const& amf = *(a_flux[ilev][idim]);
656 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
657 }
658 }
659 getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
660 for (int ilev = 0; ilev < namrlevs; ++ilev) {
661 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
662 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
663 }
664 }
665 }
666}
667
668template <typename MF>
669template <typename AMF>
670void
672 Location a_loc)
673{
674 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)), a_loc);
675}
676
677template <typename MF>
678template <typename AMF>
679void
681 const Vector<AMF*>& a_sol, Location a_loc)
682{
683 BL_PROFILE("MLMG::getFluxes()");
684
685 if (!linop.isCellCentered()) {
686 amrex::Abort("Calling wrong getFluxes for nodal solver");
687 }
688
689 if constexpr (std::is_same<AMF,MF>()) {
690 linop.getFluxes(a_flux, a_sol, a_loc);
691 } else {
692 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
693 for (int ilev = 0; ilev < namrlevs; ++ilev) {
694 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
695 auto const& amf = *(a_flux[ilev][idim]);
696 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
697 }
698 LocalCopy(sol[ilev], *a_sol[ilev], 0, 0, ncomp, nGrowVect(sol[ilev]));
699 }
700 linop.getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
701 for (int ilev = 0; ilev < namrlevs; ++ilev) {
702 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
703 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
704 }
705 }
706 }
707}
708
709template <typename MF>
710template <typename AMF>
711void
713 std::initializer_list<AMF*> a_sol, Location a_loc)
714{
715 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)),
716 Vector<AMF*>(std::move(a_sol)), a_loc);
717}
718
719template <typename MF>
720template <typename AMF>
721void
723{
724 AMREX_ASSERT(sol.size() == a_flux.size());
725 if constexpr (std::is_same<AMF,MF>()) {
726 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
727 } else {
728 Vector<MF> fluxes(namrlevs);
729 for (int ilev = 0; ilev < namrlevs; ++ilev) {
730 auto const& amf = *a_flux[ilev];
731 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
732 }
733 getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
734 for (int ilev = 0; ilev < namrlevs; ++ilev) {
735 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
736 }
737 }
738}
739
740template <typename MF>
741template <typename AMF>
742void
743MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux, Location a_loc)
744{
745 getFluxes(Vector<AMF*>(std::move(a_flux)), a_loc);
746}
747
748template <typename MF>
749template <typename AMF>
750void
752 const Vector<AMF*>& a_sol, Location /*a_loc*/)
753{
754 AMREX_ASSERT(nComp(*a_flux[0]) >= AMREX_SPACEDIM);
755
756 if constexpr (! std::is_same<AMF,MF>()) {
757 for (int alev = 0; alev < namrlevs; ++alev) {
758 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, nGrowVect(sol[alev]));
759 }
760 }
761
762 if (linop.isCellCentered())
763 {
764 Vector<Array<MF,AMREX_SPACEDIM> > ffluxes(namrlevs);
765 for (int alev = 0; alev < namrlevs; ++alev) {
766 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
767 const int mglev = 0;
768 int nghost = 0;
769 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
770 ffluxes[alev][idim].define(amrex::convert(linop.m_grids[alev][mglev],
772 linop.m_dmap[alev][mglev], ncomp, nghost, MFInfo(),
773 *linop.m_factory[alev][mglev]);
774 }
775 }
776 if constexpr (std::is_same<AMF,MF>()) {
777 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), a_sol, Location::FaceCenter);
778 } else {
779 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), GetVecOfPtrs(sol), Location::FaceCenter);
780 }
781 for (int alev = 0; alev < namrlevs; ++alev) {
782#ifdef AMREX_USE_EB
783 EB_average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
784#else
785 average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
786#endif
787 }
788
789 } else {
790 if constexpr (std::is_same<AMF,MF>()) {
791 linop.getFluxes(a_flux, a_sol);
792 } else {
793 Vector<MF> fluxes(namrlevs);
794 for (int ilev = 0; ilev < namrlevs; ++ilev) {
795 auto const& amf = *a_flux[ilev];
796 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
797 }
798 linop.getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol));
799 for (int ilev = 0; ilev < namrlevs; ++ilev) {
800 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
801 }
802 }
803 }
804}
805
806template <typename MF>
807template <typename AMF>
808void
809MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux,
810 std::initializer_list<AMF*> a_sol, Location a_loc)
811{
812 getFluxes(Vector<AMF*>(std::move(a_flux)),
813 Vector<AMF*>(std::move(a_sol)), a_loc);
814}
815
816#ifdef AMREX_USE_EB
817template <typename MF>
818void
819MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux)
820{
821 if (!linop.isCellCentered()) {
822 amrex::Abort("getEBFluxes is for cell-centered only");
823 }
824
825 AMREX_ASSERT(sol.size() == a_eb_flux.size());
826 getEBFluxes(a_eb_flux, GetVecOfPtrs(sol));
827}
828
829template <typename MF>
830void
831MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*>& a_sol)
832{
833 BL_PROFILE("MLMG::getEBFluxes()");
834
835 if (!linop.isCellCentered()) {
836 amrex::Abort("getEBFluxes is for cell-centered only");
837 }
838
839 linop.getEBFluxes(a_eb_flux, a_sol);
840}
841#endif
842
843template <typename MF>
844void
846 const Vector<MF const*>& a_rhs)
847{
848 BL_PROFILE("MLMG::compResidual()");
849
850 IntVect ng_sol(1);
851 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
852
853 sol.resize(namrlevs);
854 sol_is_alias.resize(namrlevs,true);
855 for (int alev = 0; alev < namrlevs; ++alev)
856 {
857 if (cf_strategy == CFStrategy::ghostnodes || nGrowVect(*a_sol[alev]) == ng_sol)
858 {
859 sol[alev] = linop.makeAlias(*a_sol[alev]);
860 sol_is_alias[alev] = true;
861 }
862 else
863 {
864 if (sol_is_alias[alev])
865 {
866 sol[alev] = linop.make(alev, 0, ng_sol);
867 }
868 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
869 }
870 }
871
872 prepareLinOp();
873
874 const auto& amrrr = linop.AMRRefRatio();
875
876 for (int alev = finest_amr_lev; alev >= 0; --alev) {
877 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
878 const MF* prhs = a_rhs[alev];
879#if (AMREX_SPACEDIM != 3)
880 int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
881 MF rhstmp(boxArray(*prhs), DistributionMap(*prhs), ncomp, nghost,
882 MFInfo(), *linop.Factory(alev));
883 LocalCopy(rhstmp, *prhs, 0, 0, ncomp, IntVect(nghost));
884 linop.applyMetricTerm(alev, 0, rhstmp);
885 linop.unimposeNeumannBC(alev, rhstmp);
886 linop.applyInhomogNeumannTerm(alev, rhstmp);
887 prhs = &rhstmp;
888#endif
889 linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
890 if (alev < finest_amr_lev) {
891 linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
892 *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
893 if (linop.isCellCentered()) {
894#ifdef AMREX_USE_EB
895 EB_average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
896#else
897 average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
898#endif
899 }
900 }
901 }
902
903
904#if (AMREX_SPACEDIM != 3)
905 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
906 linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
907 }
908#endif
909}
910
911template <typename MF>
912void
913MLMGT<MF>::apply (const Vector<MF*>& out, const Vector<MF*>& a_in)
914{
915 BL_PROFILE("MLMG::apply()");
916
917 Vector<MF*> in(namrlevs);
918 Vector<MF> in_raii(namrlevs);
919 Vector<MF> rh(namrlevs);
920 int nghost = 0;
921 IntVect ng_sol(1);
922 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
923
924 for (int alev = 0; alev < namrlevs; ++alev)
925 {
926 if (cf_strategy == CFStrategy::ghostnodes)
927 {
928 nghost = linop.getNGrow(alev);
929 in[alev] = a_in[alev];
930 }
931 else if (nGrowVect(*a_in[alev]) == ng_sol)
932 {
933 in[alev] = a_in[alev];
934 }
935 else
936 {
937 IntVect ng = ng_sol;
938 if (cf_strategy == CFStrategy::ghostnodes) { ng = IntVect(nghost); }
939 in_raii[alev] = linop.make(alev, 0, ng);
940 LocalCopy(in_raii[alev], *a_in[alev], 0, 0, ncomp, IntVect(nghost));
941 in[alev] = &(in_raii[alev]);
942 }
943 rh[alev] = linop.make(alev, 0, IntVect(nghost));
944 setVal(rh[alev], RT(0.0));
945 }
946
947 prepareLinOp();
948
949 for (int alev = 0; alev < namrlevs; ++alev) {
950 linop.applyInhomogNeumannTerm(alev, rh[alev]);
951 }
952
953 const auto& amrrr = linop.AMRRefRatio();
954
955 for (int alev = finest_amr_lev; alev >= 0; --alev) {
956 const MF* crse_bcdata = (alev > 0) ? in[alev-1] : nullptr;
957 linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
958 if (alev < finest_amr_lev) {
959 linop.reflux(alev, *out[alev], *in[alev], rh[alev],
960 *out[alev+1], *in[alev+1], rh[alev+1]);
961 if (linop.isCellCentered()) {
962 if constexpr (IsMultiFabLike_v<MF>) {
963#ifdef AMREX_USE_EB
964 EB_average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
965#else
966 average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
967#endif
968 } else {
969 amrex::Abort("MLMG: TODO average_down for non-MultiFab");
970 }
971 }
972 }
973 }
974
975#if (AMREX_SPACEDIM != 3)
976 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
977 linop.unapplyMetricTerm(alev, 0, *out[alev]);
978 }
979#endif
980
981 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
982 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
983 Scale(*out[alev], RT(-1), 0, nComp(*out[alev]), nghost);
984 }
985}
986
987template <typename MF>
988void
990{
991 precond_mode = true;
992 linop.beginPrecondBC();
993 apply(out, in);
994 linop.endPrecondBC();
995 precond_mode = false;
996}
997
998template <typename MF>
999template <typename AMF>
1000void
1002{
1003 BL_PROFILE("MLMG::prepareForSolve()");
1004
1005 AMREX_ASSERT(namrlevs <= a_sol.size());
1006 AMREX_ASSERT(namrlevs <= a_rhs.size());
1007
1008 timer.assign(ntimers, 0.0);
1009
1010 IntVect ng_rhs(0);
1011 IntVect ng_sol(1);
1012 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1013
1014 if (!linop_prepared) {
1015 linop.prepareForSolve();
1016 linop_prepared = true;
1017 } else if (linop.needsUpdate()) {
1018 linop.update();
1019
1020#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1021 hypre_solver.reset();
1022 hypre_bndry.reset();
1023 hypre_node_solver.reset();
1024#endif
1025
1026#ifdef AMREX_USE_PETSC
1027 petsc_solver.reset();
1028 petsc_bndry.reset();
1029#endif
1030 }
1031
1032 sol.resize(namrlevs);
1033 sol_is_alias.resize(namrlevs,false);
1034 for (int alev = 0; alev < namrlevs; ++alev)
1035 {
1036 if (cf_strategy == CFStrategy::ghostnodes)
1037 {
1038 if constexpr (std::is_same<AMF,MF>()) {
1039 sol[alev] = linop.makeAlias(*a_sol[alev]);
1040 sol_is_alias[alev] = true;
1041 } else {
1042 amrex::Abort("Type conversion not supported for CFStrategy::ghostnodes");
1043 }
1044 }
1045 else
1046 {
1047 if (nGrowVect(*a_sol[alev]) == ng_sol) {
1048 if constexpr (std::is_same<AMF,MF>()) {
1049 sol[alev] = linop.makeAlias(*a_sol[alev]);
1050 sol_is_alias[alev] = true;
1051 }
1052 }
1053 if (!sol_is_alias[alev]) {
1054 if (!solve_called) {
1055 sol[alev] = linop.make(alev, 0, ng_sol);
1056 }
1057 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
1058 setBndry(sol[alev], RT(0.0), 0, ncomp);
1059 }
1060 }
1061 }
1062
1063 rhs.resize(namrlevs);
1064 for (int alev = 0; alev < namrlevs; ++alev)
1065 {
1066 if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs = IntVect(linop.getNGrow(alev)); }
1067 if (!solve_called) {
1068 rhs[alev] = linop.make(alev, 0, ng_rhs);
1069 }
1070 LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
1071 linop.applyMetricTerm(alev, 0, rhs[alev]);
1072 linop.unimposeNeumannBC(alev, rhs[alev]);
1073 linop.applyInhomogNeumannTerm(alev, rhs[alev]);
1074 linop.applyOverset(alev, rhs[alev]);
1075 if ( ! precond_mode) {
1076 bool r = linop.scaleRHS(alev, &(rhs[alev]));
1078 }
1079
1080#ifdef AMREX_USE_EB
1081 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(linop.Factory(alev));
1082 if (factory && !factory->isAllRegular()) {
1083 if constexpr (std::is_same<MF,MultiFab>()) {
1084 EB_set_covered(rhs[alev], 0, ncomp, 0, RT(0.0));
1085 EB_set_covered(sol[alev], 0, ncomp, 0, RT(0.0));
1086 } else {
1087 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1088 }
1089 }
1090#endif
1091 }
1092
1093 for (int falev = finest_amr_lev; falev > 0; --falev)
1094 {
1095 linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1096 }
1097
1098 // enforce solvability if appropriate
1099 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1100 {
1101 makeSolvable();
1102 }
1103
1104 IntVect ng = linop.getNGrowVectRestriction();
1105 if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1106 if (!solve_called) {
1107 linop.make(res, ng);
1108 linop.make(rescor, ng);
1109 }
1110 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1111 {
1112 const int nmglevs = linop.NMGLevels(alev);
1113 for (int mglev = 0; mglev < nmglevs; ++mglev)
1114 {
1115 setVal(res [alev][mglev], RT(0.0));
1116 setVal(rescor[alev][mglev], RT(0.0));
1117 }
1118 }
1119
1120 if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1121 cor.resize(namrlevs);
1122 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1123 {
1124 const int nmglevs = linop.NMGLevels(alev);
1125 cor[alev].resize(nmglevs);
1126 for (int mglev = 0; mglev < nmglevs; ++mglev)
1127 {
1128 if (!solve_called) {
1129 IntVect _ng = ng;
1130 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1131 cor[alev][mglev] = linop.make(alev, mglev, _ng);
1132 }
1133 setVal(cor[alev][mglev], RT(0.0));
1134 }
1135 }
1136
1137 cor_hold.resize(std::max(namrlevs-1,1));
1138 {
1139 const int alev = 0;
1140 const int nmglevs = linop.NMGLevels(alev);
1141 cor_hold[alev].resize(nmglevs);
1142 for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1143 {
1144 if (!solve_called) {
1145 IntVect _ng = ng;
1146 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1147 cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1148 }
1149 setVal(cor_hold[alev][mglev], RT(0.0));
1150 }
1151 }
1152 for (int alev = 1; alev < finest_amr_lev; ++alev)
1153 {
1154 cor_hold[alev].resize(1);
1155 if (!solve_called) {
1156 IntVect _ng = ng;
1157 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev)); }
1158 cor_hold[alev][0] = linop.make(alev, 0, _ng);
1159 }
1160 setVal(cor_hold[alev][0], RT(0.0));
1161 }
1162
1163 if (linop.m_parent // no embedded N-Solve
1164 || !linop.supportNSolve())
1165 {
1166 do_nsolve = false;
1167 }
1168
1169 if (do_nsolve && ns_linop == nullptr)
1170 {
1171 prepareForNSolve();
1172 }
1173
1174 if (verbose >= 2) {
1175 amrex::Print() << print_ident << "MLMG: # of AMR levels: " << namrlevs << "\n"
1176 << print_ident << " # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1177 << "\n";
1178 if (ns_linop) {
1179 amrex::Print() << print_ident << " # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) << "\n"
1180 << print_ident << " # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() << "\n";
1181 }
1182 }
1183}
1184
1185template <typename MF>
1186void
1188{
1189 if (!linop_prepared) {
1190 linop.prepareForSolve();
1191 linop_prepared = true;
1192 } else if (linop.needsUpdate()) {
1193 linop.update();
1194 }
1195}
1196
1197template <typename MF>
1198void
1200{
1201 prepareLinOp();
1202 linop.preparePrecond();
1203}
1204
1205template <typename MF>
1206void
1208{
1209 if constexpr (IsMultiFabLike_v<MF>) {
1210 ns_linop = linop.makeNLinOp(nsolve_grid_size);
1211
1212 int nghost = 0;
1213 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1214
1215 const BoxArray& ba = (*ns_linop).m_grids[0][0];
1216 const DistributionMapping& dm =(*ns_linop).m_dmap[0][0];
1217
1218 int ng = 1;
1219 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1220 ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1221 ng = 0;
1222 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1223 ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1224 setVal(*ns_sol, RT(0.0));
1225 setVal(*ns_rhs, RT(0.0));
1226
1227 ns_linop->setLevelBC(0, ns_sol.get());
1228
1229 ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1230 ns_mlmg->setVerbose(0);
1231 ns_mlmg->setFixedIter(1);
1232 ns_mlmg->setMaxFmgIter(20);
1233 ns_mlmg->setBottomSolver(BottomSolver::smoother);
1234 }
1235}
1236
1237// in : Residual (res) on the finest AMR level
1238// out : sol on all AMR levels
1239template <typename MF>
1240void MLMGT<MF>::oneIter (int iter)
1241{
1242 BL_PROFILE("MLMG::oneIter()");
1243
1244 for (int alev = finest_amr_lev; alev > 0; --alev)
1245 {
1246 miniCycle(alev);
1247
1248 IntVect nghost(0);
1249 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1250 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1251
1252 // compute residual for the coarse AMR level
1253 computeResWithCrseSolFineCor(alev-1,alev);
1254
1255 if (alev != finest_amr_lev) {
1256 std::swap(cor_hold[alev][0], cor[alev][0]); // save it for the up cycle
1257 }
1258 }
1259
1260 // coarsest amr level
1261 {
1262 // enforce solvability if appropriate
1263 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1264 {
1265 makeSolvable(0,0,res[0][0]);
1266 }
1267
1268 if (iter < max_fmg_iters) {
1269 mgFcycle();
1270 } else {
1271 mgVcycle(0, 0);
1272 }
1273
1274 IntVect nghost(0);
1275 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(0)); }
1276 LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1277 }
1278
1279 for (int alev = 1; alev <= finest_amr_lev; ++alev)
1280 {
1281 // (Fine AMR correction) = I(Coarse AMR correction)
1282 interpCorrection(alev);
1283
1284 IntVect nghost(0);
1285 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1286 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1287
1288 if (alev != finest_amr_lev) {
1289 LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1290 }
1291
1292 // Update fine AMR level correction
1293 computeResWithCrseCorFineCor(alev);
1294
1295 miniCycle(alev);
1296
1297 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1298
1299 if (alev != finest_amr_lev) {
1300 LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1301 }
1302 }
1303
1304 linop.averageDownAndSync(sol);
1305}
1306
1307template <typename MF>
1308void
1310{
1311 BL_PROFILE("MLMG::miniCycle()");
1312 const int mglev = 0;
1313 mgVcycle(amrlev, mglev);
1314}
1315
1316// in : Residual (res)
1317// out : Correction (cor) from bottom to this function's local top
1318template <typename MF>
1319void
1320MLMGT<MF>::mgVcycle (int amrlev, int mglev_top)
1321{
1322 BL_PROFILE("MLMG::mgVcycle()");
1323
1324 const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1325
1326 for (int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1327 {
1328 BL_PROFILE_VAR("MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1329
1330 if (verbose >= 4)
1331 {
1332 RT norm = norminf(res[amrlev][mglev],0,ncomp,IntVect(0));
1333 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1334 << " DN: Norm before smooth " << norm << "\n";
1335 }
1336
1337 setVal(cor[amrlev][mglev], RT(0.0));
1338 bool skip_fillboundary = true;
1339 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary, nu1);
1340
1341 // rescor = res - L(cor)
1342 computeResOfCorrection(amrlev, mglev);
1343
1344 if (verbose >= 4)
1345 {
1346 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1347 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1348 << " DN: Norm after smooth " << norm << "\n";
1349 }
1350
1351 // res_crse = R(rescor_fine); this provides res/b to the level below
1352 linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1353 }
1354
1355 BL_PROFILE_VAR("MLMG::mgVcycle_bottom", blp_bottom);
1356 if (amrlev == 0)
1357 {
1358 if (verbose >= 4)
1359 {
1360 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1361 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1362 << " DN: Norm before bottom " << norm << "\n";
1363 }
1364 bottomSolve();
1365 if (verbose >= 4)
1366 {
1367 computeResOfCorrection(amrlev, mglev_bottom);
1368 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1369 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1370 << " UP: Norm after bottom " << norm << "\n";
1371 }
1372 }
1373 else
1374 {
1375 if (verbose >= 4)
1376 {
1377 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1378 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1379 << " Norm before smooth " << norm << "\n";
1380 }
1381 setVal(cor[amrlev][mglev_bottom], RT(0.0));
1382 bool skip_fillboundary = true;
1383 linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1384 res[amrlev][mglev_bottom], skip_fillboundary, nu1);
1385 if (verbose >= 4)
1386 {
1387 computeResOfCorrection(amrlev, mglev_bottom);
1388 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1389 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1390 << " Norm after smooth " << norm << "\n";
1391 }
1392 }
1393 BL_PROFILE_VAR_STOP(blp_bottom);
1394
1395 for (int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1396 {
1397 BL_PROFILE_VAR("MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1398 // cor_fine += I(cor_crse)
1399 addInterpCorrection(amrlev, mglev);
1400 if (verbose >= 4)
1401 {
1402 computeResOfCorrection(amrlev, mglev);
1403 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1404 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1405 << " UP: Norm before smooth " << norm << "\n";
1406 }
1407 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], false, nu2);
1408
1409 if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1410
1411 if (verbose >= 4)
1412 {
1413 computeResOfCorrection(amrlev, mglev);
1414 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1415 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1416 << " UP: Norm after smooth " << norm << "\n";
1417 }
1418 }
1419}
1420
1421// FMG cycle on the coarsest AMR level.
1422// in: Residual on the top MG level (i.e., 0)
1423// out: Correction (cor) on all MG levels
1424template <typename MF>
1425void
1427{
1428 BL_PROFILE("MLMG::mgFcycle()");
1429
1430#ifdef AMREX_USE_EB
1431 AMREX_ASSERT(linop.isCellCentered());
1432#endif
1433
1434 const int amrlev = 0;
1435 const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1436 IntVect nghost(0);
1437 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(amrlev)); }
1438
1439 for (int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1440 {
1441 linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1442 }
1443
1444 bottomSolve();
1445
1446 for (int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1447 {
1448 // cor_fine = I(cor_crse)
1449 interpCorrection(amrlev, mglev);
1450
1451 // rescor = res - L(cor)
1452 computeResOfCorrection(amrlev, mglev);
1453 // res = rescor; this provides b to the vcycle below
1454 LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1455
1456 // save cor; do v-cycle; add the saved to cor
1457 std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1458 mgVcycle(amrlev, mglev);
1459 LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1460 }
1461}
1462
1463// At the true bottom of the coarsest AMR level.
1464// in : Residual (res) as b
1465// out : Correction (cor) as x
1466template <typename MF>
1467void
1469{
1470 if (do_nsolve)
1471 {
1472 NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1473 }
1474 else
1475 {
1476 actualBottomSolve();
1477 }
1478}
1479
1480template <typename MF>
1481void
1482MLMGT<MF>::NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs)
1483{
1484 BL_PROFILE("MLMG::NSolve()");
1485
1486 setVal(a_sol, RT(0.0));
1487
1488 MF const& res_bottom = res[0].back();
1489 if (BoxArray::SameRefs(boxArray(a_rhs),boxArray(res_bottom)) &&
1491 {
1492 LocalCopy(a_rhs, res_bottom, 0, 0, ncomp, IntVect(0));
1493 } else {
1494 setVal(a_rhs, RT(0.0));
1495 ParallelCopy(a_rhs, res_bottom, 0, 0, ncomp);
1496 }
1497
1498 a_solver.solve(Vector<MF*>{&a_sol}, Vector<MF const*>{&a_rhs},
1499 RT(-1.0), RT(-1.0));
1500
1501 linop.copyNSolveSolution(cor[0].back(), a_sol);
1502}
1503
1504template <typename MF>
1505void
1507{
1508 BL_PROFILE("MLMG::actualBottomSolve()");
1509
1510 if (!linop.isBottomActive()) { return; }
1511
1512 auto bottom_start_time = amrex::second();
1513
1514 ParallelContext::push(linop.BottomCommunicator());
1515
1516 const int amrlev = 0;
1517 const int mglev = linop.NMGLevels(amrlev) - 1;
1518 auto& x = cor[amrlev][mglev];
1519 auto& b = res[amrlev][mglev];
1520
1521 setVal(x, RT(0.0));
1522
1523 if (bottom_solver == BottomSolver::smoother)
1524 {
1525 bool skip_fillboundary = true;
1526 linop.smooth(amrlev, mglev, x, b, skip_fillboundary, nuf);
1527 }
1528 else
1529 {
1530 MF* bottom_b = &b;
1531 MF raii_b;
1532 if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1533 {
1534 const IntVect ng = nGrowVect(b);
1535 raii_b = linop.make(amrlev, mglev, ng);
1536 LocalCopy(raii_b, b, 0, 0, ncomp, ng);
1537 bottom_b = &raii_b;
1538
1539 makeSolvable(amrlev,mglev,*bottom_b);
1540 }
1541
1542 if (bottom_solver == BottomSolver::hypre)
1543 {
1544#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1545 if constexpr (std::is_same<MF,MultiFab>()) {
1546 bottomSolveWithHypre(x, *bottom_b);
1547 } else
1548#endif
1549 {
1550 amrex::Abort("Using Hypre as bottom solver not supported in this case");
1551 }
1552 }
1553 else if (bottom_solver == BottomSolver::petsc)
1554 {
1555#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1556 if constexpr (std::is_same<MF,MultiFab>()) {
1557 bottomSolveWithPETSc(x, *bottom_b);
1558 } else
1559#endif
1560 {
1561 amrex::Abort("Using PETSc as bottom solver not supported in this case");
1562 }
1563 }
1564 else
1565 {
1566 typename MLCGSolverT<MF>::Type cg_type;
1567 if (bottom_solver == BottomSolver::cg ||
1568 bottom_solver == BottomSolver::cgbicg) {
1569 cg_type = MLCGSolverT<MF>::Type::CG;
1570 } else {
1572 }
1573
1574 int ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1575
1576 if (ret != 0 && (bottom_solver == BottomSolver::cgbicg ||
1577 bottom_solver == BottomSolver::bicgcg))
1578 {
1579 if (bottom_solver == BottomSolver::cgbicg) {
1580 cg_type = MLCGSolverT<MF>::Type::BiCGStab; // switch to bicg
1581 } else {
1582 cg_type = MLCGSolverT<MF>::Type::CG; // switch to cg
1583 }
1584 setVal(cor[amrlev][mglev], RT(0.0));
1585 ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1586 if (ret == 0) { // switch permanently
1587 if (cg_type == MLCGSolverT<MF>::Type::CG) {
1588 bottom_solver = BottomSolver::cg;
1589 } else {
1590 bottom_solver = BottomSolver::bicgstab;
1591 }
1592 }
1593 }
1594
1595 // If the bottom solve failed then set the correction to zero
1596 if (ret != 0 && ret != 9) {
1597 setVal(cor[amrlev][mglev], RT(0.0));
1598 }
1599 const int n = (ret==0) ? nub : nuf;
1600 linop.smooth(amrlev, mglev, x, b, false, n);
1601 }
1602 }
1603
1605
1606 if (! timer.empty()) {
1607 timer[bottom_time] += amrex::second() - bottom_start_time;
1608 }
1609}
1610
1611template <typename MF>
1612int
1614{
1615 MLCGSolverT<MF> cg_solver(linop);
1616 cg_solver.setSolver(type);
1617 cg_solver.setVerbose(bottom_verbose);
1618 cg_solver.setPrintIdentation(print_ident);
1619 cg_solver.setMaxIter(bottom_maxiter);
1620 cg_solver.setInitSolnZeroed(true);
1621 if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.setNGhost(linop.getNGrow()); }
1622
1623 int ret = cg_solver.solve(x, b, bottom_reltol, bottom_abstol);
1624 if (ret != 0 && verbose > 1) {
1625 amrex::Print() << print_ident << "MLMG: Bottom solve failed.\n";
1626 }
1627 m_niters_cg.push_back(cg_solver.getNumIters());
1628 return ret;
1629}
1630
1631// Compute multi-level Residual (res) up to amrlevmax.
1632template <typename MF>
1633void
1635{
1636 BL_PROFILE("MLMG::computeMLResidual()");
1637
1638 const int mglev = 0;
1639 for (int alev = amrlevmax; alev >= 0; --alev) {
1640 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1641 linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1642 if (alev < finest_amr_lev) {
1643 linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1644 res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1645 }
1646 }
1647}
1648
1649// Compute single AMR level residual without masking.
1650template <typename MF>
1651void
1653{
1654 BL_PROFILE("MLMG::computeResidual()");
1655 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1656 linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1657}
1658
1659// Compute coarse AMR level composite residual with coarse solution and fine correction
1660template <typename MF>
1661void
1663{
1664 BL_PROFILE("MLMG::computeResWithCrseSolFineCor()");
1665
1666 IntVect nghost(0);
1667 if (cf_strategy == CFStrategy::ghostnodes) {
1668 nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1669 }
1670
1671 MF& crse_sol = sol[calev];
1672 const MF& crse_rhs = rhs[calev];
1673 MF& crse_res = res[calev][0];
1674
1675 MF& fine_sol = sol[falev];
1676 const MF& fine_rhs = rhs[falev];
1677 MF& fine_cor = cor[falev][0];
1678 MF& fine_res = res[falev][0];
1679 MF& fine_rescor = rescor[falev][0];
1680
1681 const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) : nullptr;
1682 linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1683
1684 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1685 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1686
1687 linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1688
1689 linop.avgDownResAmr(calev, crse_res, fine_res);
1690}
1691
1692// Compute fine AMR level residual fine_res = fine_res - L(fine_cor) with coarse providing BC.
1693template <typename MF>
1694void
1696{
1697 BL_PROFILE("MLMG::computeResWithCrseCorFineCor()");
1698
1699 IntVect nghost(0);
1700 if (cf_strategy == CFStrategy::ghostnodes) {
1701 nghost = IntVect(linop.getNGrow(falev));
1702 }
1703
1704 const MF& crse_cor = cor[falev-1][0];
1705
1706 MF& fine_cor = cor [falev][0];
1707 MF& fine_res = res [falev][0];
1708 MF& fine_rescor = rescor[falev][0];
1709
1710 // fine_rescor = fine_res - L(fine_cor)
1711 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1712 BCMode::Inhomogeneous, &crse_cor);
1713 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1714}
1715
1716// Interpolate correction from coarse to fine AMR level.
1717template <typename MF>
1718void
1720{
1721 BL_PROFILE("MLMG::interpCorrection_1");
1722
1723 IntVect nghost(0);
1724 if (cf_strategy == CFStrategy::ghostnodes) {
1725 nghost = IntVect(linop.getNGrow(alev));
1726 }
1727
1728 MF const& crse_cor = cor[alev-1][0];
1729 MF & fine_cor = cor[alev ][0];
1730
1731 const Geometry& crse_geom = linop.Geom(alev-1,0);
1732
1733 int ng_src = 0;
1734 int ng_dst = linop.isCellCentered() ? 1 : 0;
1735 if (cf_strategy == CFStrategy::ghostnodes)
1736 {
1737 ng_src = linop.getNGrow(alev-1);
1738 ng_dst = linop.getNGrow(alev-1);
1739 }
1740
1741 MF cfine = linop.makeCoarseAmr(alev, IntVect(ng_dst));
1742 setVal(cfine, RT(0.0));
1743 ParallelCopy(cfine, crse_cor, 0, 0, ncomp, IntVect(ng_src), IntVect(ng_dst),
1744 crse_geom.periodicity());
1745
1746 linop.interpolationAmr(alev, fine_cor, cfine, nghost); // NOLINT(readability-suspicious-call-argument)
1747}
1748
1749// Interpolate correction between MG levels
1750// inout: Correction (cor) on coarse MG lev. (out due to FillBoundary)
1751// out : Correction (cor) on fine MG lev.
1752template <typename MF>
1753void
1754MLMGT<MF>::interpCorrection (int alev, int mglev)
1755{
1756 BL_PROFILE("MLMG::interpCorrection_2");
1757
1758 MF& crse_cor = cor[alev][mglev+1];
1759 MF& fine_cor = cor[alev][mglev ];
1760 linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1761}
1762
1763// (Fine MG level correction) += I(Coarse MG level correction)
1764template <typename MF>
1765void
1767{
1768 BL_PROFILE("MLMG::addInterpCorrection()");
1769
1770 const MF& crse_cor = cor[alev][mglev+1];
1771 MF& fine_cor = cor[alev][mglev ];
1772
1773 MF cfine;
1774 const MF* cmf;
1775
1776 if (linop.isMFIterSafe(alev, mglev, mglev+1))
1777 {
1778 cmf = &crse_cor;
1779 }
1780 else
1781 {
1782 cfine = linop.makeCoarseMG(alev, mglev, IntVect(0));
1783 ParallelCopy(cfine, crse_cor, 0, 0, ncomp);
1784 cmf = &cfine;
1785 }
1786
1787 linop.interpolation(alev, mglev, fine_cor, *cmf);
1788}
1789
1790// Compute rescor = res - L(cor)
1791// in : res
1792// inout: cor (out due to FillBoundary in linop.correctionResidual)
1793// out : rescor
1794template <typename MF>
1795void
1797{
1798 BL_PROFILE("MLMG:computeResOfCorrection()");
1799 MF & x = cor[amrlev][mglev];
1800 const MF& b = res[amrlev][mglev];
1801 MF & r = rescor[amrlev][mglev];
1802 linop.correctionResidual(amrlev, mglev, r, x, b, BCMode::Homogeneous);
1803}
1804
1805// Compute single-level masked inf-norm of Residual (res).
1806template <typename MF>
1807auto
1808MLMGT<MF>::ResNormInf (int alev, bool local) -> RT
1809{
1810 BL_PROFILE("MLMG::ResNormInf()");
1811 return linop.normInf(alev, res[alev][0], local);
1812}
1813
1814// Computes multi-level masked inf-norm of Residual (res).
1815template <typename MF>
1816auto
1817MLMGT<MF>::MLResNormInf (int alevmax, bool local) -> RT
1818{
1819 BL_PROFILE("MLMG::MLResNormInf()");
1820 RT r = RT(0.0);
1821 for (int alev = 0; alev <= alevmax; ++alev)
1822 {
1823 r = std::max(r, ResNormInf(alev,true));
1824 }
1826 return r;
1827}
1828
1829// Compute multi-level masked inf-norm of RHS (rhs).
1830template <typename MF>
1831auto
1833{
1834 BL_PROFILE("MLMG::MLRhsNormInf()");
1835 RT r = RT(0.0);
1836 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1837 auto t = linop.normInf(alev, rhs[alev], true);
1838 r = std::max(r, t);
1839 }
1841 return r;
1842}
1843
1844template <typename MF>
1845void
1847{
1848 auto const& offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1849 if (verbose >= 4) {
1850 for (int c = 0; c < ncomp; ++c) {
1851 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c] << " from rhs component "
1852 << c << "\n";
1853 }
1854 }
1855 for (int alev = 0; alev < namrlevs; ++alev) {
1856 linop.fixSolvabilityByOffset(alev, 0, rhs[alev], offset);
1857 }
1858}
1859
1860template <typename MF>
1861void
1862MLMGT<MF>::makeSolvable (int amrlev, int mglev, MF& mf)
1863{
1864 auto const& offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1865 if (verbose >= 4) {
1866 for (int c = 0; c < ncomp; ++c) {
1867 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c]
1868 << " from mf component c = " << c
1869 << " on level (" << amrlev << ", " << mglev << ")\n";
1870 }
1871 }
1872 linop.fixSolvabilityByOffset(amrlev, mglev, mf, offset);
1873}
1874
1875#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1876template <typename MF>
1877template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1878void
1879MLMGT<MF>::bottomSolveWithHypre (MF& x, const MF& b)
1880{
1881 const int amrlev = 0;
1882 const int mglev = linop.NMGLevels(amrlev) - 1;
1883
1884 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithHypre doesn't work with ncomp > 1");
1885
1886 if (linop.isCellCentered())
1887 {
1888 if (hypre_solver == nullptr) // We should reuse the setup
1889 {
1890 hypre_solver = linop.makeHypre(hypre_interface);
1891
1892 hypre_solver->setVerbose(bottom_verbose);
1893 if (hypre_interface == amrex::Hypre::Interface::ij) {
1894 hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1895 } else {
1896 hypre_solver->setHypreOldDefault(hypre_old_default);
1897 hypre_solver->setHypreRelaxType(hypre_relax_type);
1898 hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1899 hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1900 hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1901 }
1902
1903 const BoxArray& ba = linop.m_grids[amrlev].back();
1904 const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1905 const Geometry& geom = linop.m_geom[amrlev].back();
1906
1907 hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1908 hypre_bndry->setHomogValues();
1909 const Real* dx = linop.m_geom[0][0].CellSize();
1910 IntVect crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1911 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1912 0.5*dx[1]*crse_ratio[1],
1913 0.5*dx[2]*crse_ratio[2]));
1914 hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1915 linop.m_coarse_fine_bc_type);
1916 }
1917
1918 // IJ interface understands absolute tolerance API of hypre
1919 amrex::Real hypre_abstol =
1920 (hypre_interface == amrex::Hypre::Interface::ij)
1921 ? bottom_abstol : Real(-1.0);
1922 hypre_solver->solve(
1923 x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1924 linop.getMaxOrder());
1925 }
1926 else
1927 {
1928 if (hypre_node_solver == nullptr)
1929 {
1930 hypre_node_solver =
1931 linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1932 }
1933 hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1934 }
1935
1936 // For singular problems there may be a large constant added to all values of the solution
1937 // For precision reasons we enforce that the average of the correction from hypre is 0
1938 if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1939 {
1940 makeSolvable(amrlev, mglev, x);
1941 }
1942}
1943#endif
1944
1945#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1946template <typename MF>
1947template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1948void
1949MLMGT<MF>::bottomSolveWithPETSc (MF& x, const MF& b)
1950{
1951 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithPETSc doesn't work with ncomp > 1");
1952
1953 if(petsc_solver == nullptr)
1954 {
1955 petsc_solver = linop.makePETSc();
1956 petsc_solver->setVerbose(bottom_verbose);
1957
1958 const BoxArray& ba = linop.m_grids[0].back();
1959 const DistributionMapping& dm = linop.m_dmap[0].back();
1960 const Geometry& geom = linop.m_geom[0].back();
1961
1962 petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1963 petsc_bndry->setHomogValues();
1964 const Real* dx = linop.m_geom[0][0].CellSize();
1965 auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1966 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1967 0.5*dx[1]*crse_ratio[1],
1968 0.5*dx[2]*crse_ratio[2]));
1969 petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1970 linop.m_coarse_fine_bc_type);
1971 }
1972 petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
1973 linop.getMaxOrder());
1974}
1975#endif
1976
1977template <typename MF>
1978void
1980 const Vector<MultiFab const*>& a_rhs,
1981 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const
1982{
1983 std::string file_name(a_file_name);
1984 UtilCreateCleanDirectory(file_name, false);
1985
1987 {
1988 std::string HeaderFileName(std::string(a_file_name)+"/Header");
1989 std::ofstream HeaderFile;
1990 HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
1991 std::ofstream::trunc |
1992 std::ofstream::binary);
1993 if( ! HeaderFile.good()) {
1994 FileOpenFailed(HeaderFileName);
1995 }
1996
1997 HeaderFile.precision(17);
1998
1999 HeaderFile << linop.name() << "\n"
2000 << "a_tol_rel = " << a_tol_rel << "\n"
2001 << "a_tol_abs = " << a_tol_abs << "\n"
2002 << "verbose = " << verbose << "\n"
2003 << "max_iters = " << max_iters << "\n"
2004 << "nu1 = " << nu1 << "\n"
2005 << "nu2 = " << nu2 << "\n"
2006 << "nuf = " << nuf << "\n"
2007 << "nub = " << nub << "\n"
2008 << "max_fmg_iters = " << max_fmg_iters << "\n"
2009 << "bottom_solver = " << static_cast<int>(bottom_solver) << "\n"
2010 << "bottom_verbose = " << bottom_verbose << "\n"
2011 << "bottom_maxiter = " << bottom_maxiter << "\n"
2012 << "bottom_reltol = " << bottom_reltol << "\n"
2013 << "always_use_bnorm = " << always_use_bnorm << "\n"
2014 << "namrlevs = " << namrlevs << "\n"
2015 << "finest_amr_lev = " << finest_amr_lev << "\n"
2016 << "linop_prepared = " << linop_prepared << "\n"
2017 << "solve_called = " << solve_called << "\n";
2018
2019 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2020 UtilCreateCleanDirectory(file_name+"/Level_"+std::to_string(ilev), false);
2021 }
2022 }
2023
2025
2026 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2027 VisMF::Write(*a_sol[ilev], file_name+"/Level_"+std::to_string(ilev)+"/sol");
2028 VisMF::Write(*a_rhs[ilev], file_name+"/Level_"+std::to_string(ilev)+"/rhs");
2029 }
2030
2031 linop.checkPoint(file_name+"/linop");
2032}
2033
2034template <typename MF>
2035void
2037{
2038 print_ident.resize(print_ident.size()+4, ' ');
2039}
2040
2041template <typename MF>
2042void
2044{
2045 if (print_ident.size() > 4) {
2046 print_ident.resize(print_ident.size()-4, ' ');
2047 } else {
2048 print_ident.clear();
2049 }
2050}
2051
2052extern template class MLMGT<MultiFab>;
2053
2055
2056}
2057
2058#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition AMReX_BLProfiler.H:560
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
Print on all processors of the default communicator.
Definition AMReX_Print.H:117
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:550
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:823
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:41
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition AMReX_DistributionMapping.H:187
Definition AMReX_EBFabFactory.H:24
Solve using GMRES with multigrid as preconditioner.
Definition AMReX_GMRES_MLMG.H:21
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:355
Interface
Definition AMReX_Hypre.H:21
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:416
AMREX_GPU_HOST_DEVICE static AMREX_FORCE_INLINE constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:689
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition AMReX_IntVect.H:766
Definition AMReX_MLCGSolver.H:12
void setSolver(Type _typ) noexcept
Definition AMReX_MLCGSolver.H:28
void setVerbose(int _verbose)
Definition AMReX_MLCGSolver.H:39
int getNumIters() const noexcept
Definition AMReX_MLCGSolver.H:64
void setInitSolnZeroed(bool _sol_zeroed)
Definition AMReX_MLCGSolver.H:53
void setPrintIdentation(std::string s)
Definition AMReX_MLCGSolver.H:45
int solve(MF &solnL, const MF &rhsL, RT eps_rel, RT eps_abs)
Definition AMReX_MLCGSolver.H:89
Type
Definition AMReX_MLCGSolver.H:18
void setNGhost(int _nghost)
Definition AMReX_MLCGSolver.H:56
void setMaxIter(int _maxiter)
Definition AMReX_MLCGSolver.H:42
Definition AMReX_MLLinOp.H:98
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLLinOp.H:108
typename FabDataType< MF >::value_type RT
Definition AMReX_MLLinOp.H:109
Definition AMReX_MLMG.H:17
Definition AMReX_MLMG.H:12
void prepareForFluxes(Vector< MF const * > const &a_sol)
Definition AMReX_MLMG.H:598
void setBottomVerbose(int v) noexcept
Definition AMReX_MLMG.H:143
void setMaxFmgIter(int n) noexcept
Definition AMReX_MLMG.H:131
RT MLResNormInf(int alevmax, bool local=false)
Definition AMReX_MLMG.H:1817
RT MLRhsNormInf(bool local=false)
Definition AMReX_MLMG.H:1832
void setNoGpuSync(bool do_not_sync) noexcept
Definition AMReX_MLMG.H:158
MLMGT(MLMGT< MF > &&)=delete
void actualBottomSolve()
Definition AMReX_MLMG.H:1506
Vector< Vector< MF > > rescor
Definition AMReX_MLMG.H:335
MF MFType
Definition AMReX_MLMG.H:25
BottomSolver getBottomSolver() const noexcept
Definition AMReX_MLMG.H:141
bool linop_prepared
Definition AMReX_MLMG.H:279
int bottom_verbose
Definition AMReX_MLMG.H:265
void checkPoint(const Vector< MultiFab * > &a_sol, const Vector< MultiFab const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *a_file_name) const
Definition AMReX_MLMG.H:1979
void setPreSmooth(int n) noexcept
Definition AMReX_MLMG.H:135
void setBottomToleranceAbs(RT t) noexcept
Definition AMReX_MLMG.H:146
Vector< Vector< MF > > cor
= rhs - L(sol)
Definition AMReX_MLMG.H:333
RT getFinalResidual() const noexcept
Definition AMReX_MLMG.H:238
int do_nsolve
N Solve.
Definition AMReX_MLMG.H:283
void interpCorrection(int alev)
Definition AMReX_MLMG.H:1719
Long solve_called
Definition AMReX_MLMG.H:280
void setBottomSmooth(int n) noexcept
Definition AMReX_MLMG.H:138
int final_fill_bc
Definition AMReX_MLMG.H:272
bool precond_mode
Definition AMReX_MLMG.H:248
void setNSolve(int flag) noexcept
Definition AMReX_MLMG.H:155
int getBottomVerbose() const
Definition AMReX_MLMG.H:123
std::unique_ptr< MF > ns_sol
Definition AMReX_MLMG.H:287
std::string print_ident
Definition AMReX_MLMG.H:290
Vector< Vector< MF > > cor_hold
Definition AMReX_MLMG.H:334
void computeResOfCorrection(int amrlev, int mglev)
Definition AMReX_MLMG.H:1796
void applyPrecond(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in) as a preconditioner
Definition AMReX_MLMG.H:989
void setCFStrategy(CFStrategy a_cf_strategy) noexcept
Definition AMReX_MLMG.H:142
void computeResWithCrseCorFineCor(int falev)
Definition AMReX_MLMG.H:1695
void NSolve(MLMGT< MF > &a_solver, MF &a_sol, MF &a_rhs)
Definition AMReX_MLMG.H:1482
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLMG.H:30
RT m_final_resnorm0
Definition AMReX_MLMG.H:343
void apply(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in). Note that, if no actual solve is needed, one could turn off multigrid coarsening by cons...
Definition AMReX_MLMG.H:913
Vector< MF > rhs
Definition AMReX_MLMG.H:323
void setNSolveGridSize(int s) noexcept
Definition AMReX_MLMG.H:156
int nuf
when smoother is used as bottom solver
Definition AMReX_MLMG.H:258
bool do_no_sync_gpu
Definition AMReX_MLMG.H:292
void setVerbose(int v) noexcept
Definition AMReX_MLMG.H:129
int nu1
pre
Definition AMReX_MLMG.H:256
void computeMLResidual(int amrlevmax)
Definition AMReX_MLMG.H:1634
Vector< RT > m_iter_fine_resnorm0
Definition AMReX_MLMG.H:345
RT getInitResidual() const noexcept
Definition AMReX_MLMG.H:236
int getNumIters() const noexcept
Definition AMReX_MLMG.H:241
void setPostSmooth(int n) noexcept
Definition AMReX_MLMG.H:136
BottomSolver bottom_solver
Definition AMReX_MLMG.H:263
void mgVcycle(int amrlev, int mglev)
Definition AMReX_MLMG.H:1320
MLLinOpT< MF > & linop
Definition AMReX_MLMG.H:274
int ncomp
Definition AMReX_MLMG.H:275
void prepareForNSolve()
Definition AMReX_MLMG.H:1207
RT precond(Vector< MF * > const &a_sol, Vector< MF const * > const &a_rhs, RT a_tol_rel, RT a_tol_abs)
Definition AMReX_MLMG.H:580
int always_use_bnorm
Definition AMReX_MLMG.H:270
int max_fmg_iters
Definition AMReX_MLMG.H:261
void makeSolvable()
Definition AMReX_MLMG.H:1846
void setBottomSolver(BottomSolver s) noexcept
Definition AMReX_MLMG.H:140
int namrlevs
Definition AMReX_MLMG.H:276
void preparePrecond()
Definition AMReX_MLMG.H:1199
int nub
additional smoothing after bottom cg solver
Definition AMReX_MLMG.H:259
void incPrintIdentation()
Definition AMReX_MLMG.H:2036
typename MLLinOpT< MF >::RT RT
Definition AMReX_MLMG.H:27
int max_iters
Definition AMReX_MLMG.H:252
int do_fixed_number_of_iters
Definition AMReX_MLMG.H:253
void setThrowException(bool t) noexcept
Definition AMReX_MLMG.H:128
void decPrintIdentation()
Definition AMReX_MLMG.H:2043
void setFixedIter(int nit) noexcept
Definition AMReX_MLMG.H:132
int finest_amr_lev
Definition AMReX_MLMG.H:277
void getGradSolution(const Vector< Array< AMF *, AMREX_SPACEDIM > > &a_grad_sol, Location a_loc=Location::FaceCenter)
Definition AMReX_MLMG.H:609
Vector< RT > const & getResidualHistory() const noexcept
Definition AMReX_MLMG.H:240
void prepareLinOp()
Definition AMReX_MLMG.H:1187
RT m_rhsnorm0
Definition AMReX_MLMG.H:341
void setPrecondIter(int nit) noexcept
Definition AMReX_MLMG.H:133
CFStrategy
Definition AMReX_MLMG.H:33
void prepareForSolve(Vector< AMF * > const &a_sol, Vector< AMF const * > const &a_rhs)
Definition AMReX_MLMG.H:1001
int bottomSolveWithCG(MF &x, const MF &b, typename MLCGSolverT< MF >::Type type)
Definition AMReX_MLMG.H:1613
std::unique_ptr< MLMGT< MF > > ns_mlmg
Definition AMReX_MLMG.H:286
void setAlwaysUseBNorm(int flag) noexcept
Definition AMReX_MLMG.H:149
void compResidual(const Vector< MF * > &a_res, const Vector< MF * > &a_sol, const Vector< MF const * > &a_rhs)
Definition AMReX_MLMG.H:845
void miniCycle(int amrlev)
Definition AMReX_MLMG.H:1309
RT solve(std::initializer_list< AMF * > a_sol, std::initializer_list< AMF const * > a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
int bottom_maxiter
Definition AMReX_MLMG.H:266
bool throw_exception
Definition AMReX_MLMG.H:249
RT bottom_reltol
Definition AMReX_MLMG.H:267
Vector< Vector< MF > > res
First Vector: Amr levels. 0 is the coarest level Second Vector: MG levels. 0 is the finest level.
Definition AMReX_MLMG.H:332
void setFinalFillBC(int flag) noexcept
Definition AMReX_MLMG.H:151
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLMG.H:29
void computeResWithCrseSolFineCor(int calev, int falev)
Definition AMReX_MLMG.H:1662
MLMGT< MF > & operator=(MLMGT< MF > const &)=delete
Vector< MF > sol
Hypre.
Definition AMReX_MLMG.H:322
int nsolve_grid_size
Definition AMReX_MLMG.H:284
MLMGT(MLLinOpT< MF > &a_lp)
Definition AMReX_MLMG.H:354
int verbose
Definition AMReX_MLMG.H:250
void computeResidual(int alev)
Definition AMReX_MLMG.H:1652
MLLinOpT< MF > & getLinOp()
Definition AMReX_MLMG.H:244
std::unique_ptr< MF > ns_rhs
Definition AMReX_MLMG.H:288
timer_types
Definition AMReX_MLMG.H:338
@ ntimers
Definition AMReX_MLMG.H:338
@ bottom_time
Definition AMReX_MLMG.H:338
@ iter_time
Definition AMReX_MLMG.H:338
@ solve_time
Definition AMReX_MLMG.H:338
CFStrategy cf_strategy
Definition AMReX_MLMG.H:264
typename MLLinOpT< MF >::FAB FAB
Definition AMReX_MLMG.H:26
Vector< int > sol_is_alias
Definition AMReX_MLMG.H:326
RT getBottomToleranceAbs() const noexcept
Definition AMReX_MLMG.H:147
int numAMRLevels() const noexcept
Definition AMReX_MLMG.H:153
RT bottom_abstol
Definition AMReX_MLMG.H:268
Vector< int > m_niters_cg
Definition AMReX_MLMG.H:344
MLMGT(MLMGT< MF > const &)=delete
void mgFcycle()
Definition AMReX_MLMG.H:1426
Vector< double > timer
Definition AMReX_MLMG.H:339
RT getInitRHS() const noexcept
Definition AMReX_MLMG.H:234
RT ResNormInf(int alev, bool local=false)
Definition AMReX_MLMG.H:1808
Vector< int > const & getNumCGIters() const noexcept
Definition AMReX_MLMG.H:242
int nu2
post
Definition AMReX_MLMG.H:257
int max_precond_iters
Definition AMReX_MLMG.H:254
void bottomSolve()
Definition AMReX_MLMG.H:1468
void getFluxes(const Vector< Array< AMF *, AMREX_SPACEDIM > > &a_flux, Location a_loc=Location::FaceCenter)
For (alpha * a - beta * (del dot b grad)) phi = rhs, flux means -b grad phi
Definition AMReX_MLMG.H:640
void setBottomTolerance(RT t) noexcept
Definition AMReX_MLMG.H:145
void setFinalSmooth(int n) noexcept
Definition AMReX_MLMG.H:137
std::unique_ptr< MLLinOpT< MF > > ns_linop
Definition AMReX_MLMG.H:285
void addInterpCorrection(int alev, int mglev)
Definition AMReX_MLMG.H:1766
RT m_init_resnorm0
Definition AMReX_MLMG.H:342
int getVerbose() const
Definition AMReX_MLMG.H:122
RT solve(const Vector< AMF * > &a_sol, const Vector< AMF const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setBottomMaxIter(int n) noexcept
Definition AMReX_MLMG.H:144
void oneIter(int iter)
Definition AMReX_MLMG.H:1240
void setMaxIter(int n) noexcept
Definition AMReX_MLMG.H:130
This class provides the user with a few print options.
Definition AMReX_Print.H:35
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:27
Long size() const noexcept
Definition AMReX_Vector.H:50
static Long Write(const FabArray< FArrayBox > &mf, const std::string &name, VisMF::How how=NFiles, bool set_ghost=false)
Write a FabArray<FArrayBox> to disk in a "smart" way. Returns the total number of bytes written on th...
Definition AMReX_VisMF.cpp:975
bool setNoSyncRegion(bool b) noexcept
Definition AMReX_GpuControl.H:152
bool setSingleStreamRegion(bool b) noexcept
Definition AMReX_GpuControl.H:148
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:126
void push(MPI_Comm c)
Definition AMReX_ParallelContext.H:102
void BarrierSub() noexcept
Definition AMReX_ParallelContext.H:88
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
void pop()
Note that it's the user's responsibility to free the MPI_Comm.
Definition AMReX_ParallelContext.H:108
bool IOProcessorSub() noexcept
Am IO processor for current frame?
Definition AMReX_ParallelContext.H:80
Definition AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:1977
int nComp(FabArrayBase const &fa)
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:131
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:314
IntVect nGrowVect(FabArrayBase const &fa)
void EB_average_face_to_cellcenter(MultiFab &ccmf, int dcomp, const Array< MultiFab const *, AMREX_SPACEDIM > &fmf)
Definition AMReX_EBMultiFabUtil.cpp:806
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition AMReX_Box.H:1435
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition AMReX_FabArrayUtility.H:1898
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition AMReX_FabArrayUtility.H:1906
double second() noexcept
Definition AMReX_Utility.cpp:922
void UtilCreateCleanDirectory(const std::string &path, bool callbarrier=true)
Create a new directory, renaming the old one if it exists.
Definition AMReX_Utility.cpp:161
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
Vector< std::array< T *, AMREX_SPACEDIM > > GetVecOfArrOfPtrs(const Vector< std::array< std::unique_ptr< T >, AMREX_SPACEDIM > > &a)
Definition AMReX_Vector.H:138
IntVectND< AMREX_SPACEDIM > IntVect
Definition AMReX_BaseFwd.H:30
BottomSolver
Definition AMReX_MLLinOp.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:127
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow)
Average face-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:141
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:344
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition AMReX_FabArrayUtility.H:1882
Vector< T * > GetVecOfPtrs(Vector< T > &a)
Definition AMReX_Vector.H:61
std::array< T *, AMREX_SPACEDIM > GetArrOfPtrs(std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition AMReX_Array.H:982
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition AMReX_FabArrayUtility.H:1889
std::array< T const *, AMREX_SPACEDIM > GetArrOfConstPtrs(const std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition AMReX_Array.H:994
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:1967
int verbose
Definition AMReX_DistributionMapping.cpp:36
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition AMReX_FabArrayUtility.H:1875
BoxArray const & boxArray(FabArrayBase const &fa)
std::array< T, N > Array
Definition AMReX_Array.H:24
BCMode
Definition AMReX_MLLinOp.H:85
Location
Definition AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition AMReX_FabArray.H:66