Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_MLMG.H
Go to the documentation of this file.
1#ifndef AMREX_ML_MG_H_
2#define AMREX_ML_MG_H_
3#include <AMReX_Config.H>
4#include <AMReX_Enum.H>
5
6#include <AMReX_MLLinOp.H>
7#include <AMReX_MLCGSolver.H>
8
9namespace amrex {
10
11// Norm used to evaluate the target convergece criteria
13
14template <typename MF>
15class MLMGT
16{
17public:
18
19 class error
20 : public std::runtime_error
21 {
22 public :
23 using std::runtime_error::runtime_error;
24 };
25
26 template <typename T> friend class MLCGSolverT;
27 template <typename M> friend class GMRESMLMGT;
28
29 using MFType = MF;
30 using FAB = typename MLLinOpT<MF>::FAB;
31 using RT = typename MLLinOpT<MF>::RT;
32
33 using BCMode = typename MLLinOpT<MF>::BCMode;
35
37 enum class CFStrategy : int {none,ghostnodes};
38
39 MLMGT (MLLinOpT<MF>& a_lp);
41
42 MLMGT (MLMGT<MF> const&) = delete;
43 MLMGT (MLMGT<MF> &&) = delete;
44 MLMGT<MF>& operator= (MLMGT<MF> const&) = delete;
46
47 // Optional argument checkpoint_file is for debugging only.
48 template <typename AMF>
49 RT solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
50 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
51
52 template <typename AMF>
53 RT solve (std::initializer_list<AMF*> a_sol,
54 std::initializer_list<AMF const*> a_rhs,
55 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
56
57 RT precond (Vector<MF*> const& a_sol, Vector<MF const*> const& a_rhs,
58 RT a_tol_rel, RT a_tol_abs);
59
60 template <typename AMF>
61 void getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol,
62 Location a_loc = Location::FaceCenter);
63
64 template <typename AMF>
65 void getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol,
66 Location a_loc = Location::FaceCenter);
67
71 template <typename AMF>
72 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
73 Location a_loc = Location::FaceCenter);
74
75 template <typename AMF>
76 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
77 Location a_loc = Location::FaceCenter);
78
79 template <typename AMF>
80 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
81 const Vector<AMF*> & a_sol,
82 Location a_loc = Location::FaceCenter);
83
84 template <typename AMF>
85 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
86 std::initializer_list<AMF*> a_sol,
87 Location a_loc = Location::FaceCenter);
88
89 template <typename AMF>
90 void getFluxes (const Vector<AMF*> & a_flux,
91 Location a_loc = Location::CellCenter);
92
93 template <typename AMF>
94 void getFluxes (std::initializer_list<AMF*> a_flux,
95 Location a_loc = Location::CellCenter);
96
97 template <typename AMF>
98 void getFluxes (const Vector<AMF*> & a_flux,
99 const Vector<AMF*> & a_sol,
100 Location a_loc = Location::CellCenter);
101
102 template <typename AMF>
103 void getFluxes (std::initializer_list<AMF*> a_flux,
104 std::initializer_list<AMF*> a_sol,
105 Location a_loc = Location::CellCenter);
106
107 void compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
108 const Vector<MF const*>& a_rhs);
109
110#ifdef AMREX_USE_EB
111 // Flux into the EB wall
112 void getEBFluxes (const Vector<MF*>& a_eb_flux);
113 void getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*> & a_sol);
114#endif
115
121 void apply (const Vector<MF*>& out, const Vector<MF*>& in);
122
124 void applyPrecond (const Vector<MF*>& out, const Vector<MF*>& in);
125
126 [[nodiscard]] int getVerbose () const { return verbose; }
127 [[nodiscard]] int getBottomVerbose () const { return bottom_verbose; }
128
129 void incPrintIdentation ();
130 void decPrintIdentation ();
131
132 void setThrowException (bool t) noexcept { throw_exception = t; }
133 void setVerbose (int v) noexcept { verbose = v; }
134 void setMaxIter (int n) noexcept { max_iters = n; }
135 void setMaxFmgIter (int n) noexcept { max_fmg_iters = n; }
136 void setFixedIter (int nit) noexcept { do_fixed_number_of_iters = nit; }
137 void setPrecondIter (int nit) noexcept { max_precond_iters = nit; }
138
139 void setPreSmooth (int n) noexcept { nu1 = n; }
140 void setPostSmooth (int n) noexcept { nu2 = n; }
141 void setFinalSmooth (int n) noexcept { nuf = n; }
142 void setBottomSmooth (int n) noexcept { nub = n; }
143
144 void setBottomSolver (BottomSolver s) noexcept { bottom_solver = s; }
145 [[nodiscard]] BottomSolver getBottomSolver () const noexcept { return bottom_solver; }
146 void setCFStrategy (CFStrategy a_cf_strategy) noexcept {cf_strategy = a_cf_strategy;}
147 void setBottomVerbose (int v) noexcept { bottom_verbose = v; }
148 void setBottomMaxIter (int n) noexcept { bottom_maxiter = n; }
149 void setBottomTolerance (RT t) noexcept { bottom_reltol = t; }
150 void setBottomToleranceAbs (RT t) noexcept { bottom_abstol = t;}
151 [[nodiscard]] RT getBottomToleranceAbs () const noexcept{ return bottom_abstol; }
152
153 [[deprecated("Use MLMG::setConvergenceNormType() instead.")]]
154 void setAlwaysUseBNorm (int flag) noexcept;
155
157
158 void setFinalFillBC (int flag) noexcept { final_fill_bc = flag; }
159
160 [[nodiscard]] int numAMRLevels () const noexcept { return namrlevs; }
161
162 void setNSolve (int flag) noexcept { do_nsolve = flag; }
163 void setNSolveGridSize (int s) noexcept { nsolve_grid_size = s; }
164
165 void setNoGpuSync (bool do_not_sync) noexcept { do_no_sync_gpu = do_not_sync; }
166
167#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
168 void setHypreInterface (Hypre::Interface f) noexcept {
169 // must use ij interface for EB
170#ifndef AMREX_USE_EB
171 hypre_interface = f;
172#else
174#endif
175 }
176
178 void setHypreOptionsNamespace(const std::string& prefix) noexcept
179 {
180 hypre_options_namespace = prefix;
181 }
182
183 void setHypreOldDefault (bool l) noexcept {hypre_old_default = l;}
184 void setHypreRelaxType (int n) noexcept {hypre_relax_type = n;}
185 void setHypreRelaxOrder (int n) noexcept {hypre_relax_order = n;}
186 void setHypreNumSweeps (int n) noexcept {hypre_num_sweeps = n;}
187 void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
188#endif
189
190 void prepareForFluxes (Vector<MF const*> const& a_sol);
191
192 template <typename AMF>
193 void prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs);
194
195 void prepareForNSolve ();
196
197 void prepareLinOp ();
198
199 void preparePrecond ();
200
201 void oneIter (int iter);
202
203 void miniCycle (int amrlev);
204
205 void mgVcycle (int amrlev, int mglev);
206 void mgFcycle ();
207
208 void bottomSolve ();
209 void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
210 void actualBottomSolve ();
211
212 void computeMLResidual (int amrlevmax);
213 void computeResidual (int alev);
214 void computeResWithCrseSolFineCor (int calev, int falev);
215 void computeResWithCrseCorFineCor (int falev);
216 void interpCorrection (int alev);
217 void interpCorrection (int alev, int mglev);
218 void addInterpCorrection (int alev, int mglev);
219
220 void computeResOfCorrection (int amrlev, int mglev);
221
222 RT ResNormInf (int alev, bool local = false);
223 RT MLResNormInf (int alevmax, bool local = false);
224 RT MLRhsNormInf (bool local = false);
225
226 void makeSolvable ();
227 void makeSolvable (int amrlev, int mglev, MF& mf);
228
229#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
230 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
231 void bottomSolveWithHypre (MF& x, const MF& b);
232#endif
233
234#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
235 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
236 void bottomSolveWithPETSc (MF& x, const MF& b);
237#endif
238
239 int bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type);
240
241 [[nodiscard]] RT getInitRHS () const noexcept { return m_rhsnorm0; }
242 // Initial composite residual
243 [[nodiscard]] RT getInitResidual () const noexcept { return m_init_resnorm0; }
244 // Final composite residual
245 [[nodiscard]] RT getFinalResidual () const noexcept { return m_final_resnorm0; }
246 // Residuals on the *finest* AMR level after each iteration
247 [[nodiscard]] Vector<RT> const& getResidualHistory () const noexcept { return m_iter_fine_resnorm0; }
248 [[nodiscard]] int getNumIters () const noexcept { return m_iter_fine_resnorm0.size(); }
249 [[nodiscard]] Vector<int> const& getNumCGIters () const noexcept { return m_niters_cg; }
250
252
253private:
254
255 bool precond_mode = false;
256 bool throw_exception = false;
257 int verbose = 1;
258
259 int max_iters = 200;
262
263 int nu1 = 2;
264 int nu2 = 2;
265 int nuf = 8;
266 int nub = 0;
267
269
273 int bottom_maxiter = 200;
274 RT bottom_reltol = std::is_same<RT,double>() ? RT(1.e-4) : RT(1.e-3);
276
278
280
282 int ncomp;
285
286 bool linop_prepared = false;
287 Long solve_called = 0;
288
290 int do_nsolve = false;
292 std::unique_ptr<MLLinOpT<MF>> ns_linop;
293 std::unique_ptr<MLMGT<MF>> ns_mlmg;
294 std::unique_ptr<MF> ns_sol;
295 std::unique_ptr<MF> ns_rhs;
296
297 std::string print_ident;
298
299 bool do_no_sync_gpu = false;
300
302#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
303 // Hypre::Interface hypre_interface = Hypre::Interface::structed;
304 // Hypre::Interface hypre_interface = Hypre::Interface::semi_structed;
305 Hypre::Interface hypre_interface = Hypre::Interface::ij;
306
307 std::unique_ptr<Hypre> hypre_solver;
308 std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
309 std::unique_ptr<HypreNodeLap> hypre_node_solver;
310
311 std::string hypre_options_namespace = "hypre";
312 bool hypre_old_default = true; // Falgout coarsening with modified classical interpolation
313 int hypre_relax_type = 6; // G-S/Jacobi hybrid relaxation
314 int hypre_relax_order = 1; // uses C/F relaxation
315 int hypre_num_sweeps = 2; // Sweeps on each level
316 Real hypre_strong_threshold = 0.25; // Hypre default is 0.25
317#endif
318
320#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
321 std::unique_ptr<PETScABecLap> petsc_solver;
322 std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
323#endif
324
332
334
344
347
348 RT m_rhsnorm0 = RT(-1.0);
352 Vector<RT> m_iter_fine_resnorm0; // Residual for each iteration at the finest level
353
354 void checkPoint (const Vector<MultiFab*>& a_sol,
355 const Vector<MultiFab const*>& a_rhs,
356 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const;
357
358};
359
360template <typename MF>
362 : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
363 finest_amr_lev(a_lp.NAMRLevels()-1)
364{}
365
366template <typename MF> MLMGT<MF>::~MLMGT () = default;
367
368template <typename MF>
369void
371{
372 if (flag) {
373 norm_type = MLMGNormType::bnorm;
374 } else {
375 norm_type = MLMGNormType::greater;
376 }
377}
378
379template <typename MF>
380template <typename AMF>
381auto
382MLMGT<MF>::solve (std::initializer_list<AMF*> a_sol,
383 std::initializer_list<AMF const*> a_rhs,
384 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
385{
386 return solve(Vector<AMF*>(std::move(a_sol)),
387 Vector<AMF const*>(std::move(a_rhs)),
388 a_tol_rel, a_tol_abs, checkpoint_file);
389}
390
391template <typename MF>
392template <typename AMF>
393auto
395 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
396{
397 BL_PROFILE("MLMG::solve()");
398
399 bool prev_in_single_stream_region = false;
400 bool prev_in_nosync_region = false;
401
402 if (do_no_sync_gpu) {
403 prev_in_single_stream_region = Gpu::setSingleStreamRegion(true);
404 prev_in_nosync_region = Gpu::setNoSyncRegion(true);
405 }
406
407 if constexpr (std::is_same<AMF,MultiFab>()) {
408 if (checkpoint_file != nullptr) {
409 checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
410 }
411 }
412
413 if (bottom_solver == BottomSolver::Default) {
414 bottom_solver = linop.getDefaultBottomSolver();
415 }
416
417#if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
418 if constexpr (IsFabArray_v<AMF>) {
419 if (bottom_solver == BottomSolver::hypre || bottom_solver == BottomSolver::petsc) {
420 int mo = linop.getMaxOrder();
421 if (a_sol[0]->hasEBFabFactory()) {
422 linop.setMaxOrder(2);
423 } else {
424 linop.setMaxOrder(std::min(3,mo)); // maxorder = 4 not supported
425 }
426 }
427 }
428#endif
429
430 bool is_nsolve = linop.m_parent;
431
432 auto solve_start_time = amrex::second();
433
434 RT& composite_norminf = m_final_resnorm0;
435
436 m_niters_cg.clear();
437 m_iter_fine_resnorm0.clear();
438
439 prepareForSolve(a_sol, a_rhs);
440
441 computeMLResidual(finest_amr_lev);
442
443 bool local = true;
444 RT resnorm0 = MLResNormInf(finest_amr_lev, local);
445 RT rhsnorm0 = MLRhsNormInf(local);
446 if (!is_nsolve) {
447 ParallelAllReduce::Max<RT>({resnorm0, rhsnorm0}, ParallelContext::CommunicatorSub());
448
449 if (verbose >= 1)
450 {
451 amrex::Print() << print_ident << "MLMG: Initial rhs = " << rhsnorm0 << "\n"
452 << print_ident << "MLMG: Initial residual (resid0) = " << resnorm0 << "\n";
453 }
454 }
455
456 m_init_resnorm0 = resnorm0;
457 m_rhsnorm0 = rhsnorm0;
458
459 RT max_norm = resnorm0;
460 std::string norm_name = "resid0";
461 switch (norm_type) {
463 if (rhsnorm0 >= resnorm0) {
464 norm_name = "bnorm";
465 max_norm = rhsnorm0;
466 } else {
467 norm_name = "resid0";
468 max_norm = resnorm0;
469 }
470 break;
472 norm_name = "bnorm";
473 max_norm = rhsnorm0;
474 break;
476 norm_name = "resid0";
477 max_norm = resnorm0;
478 break;
479 }
480
481 const RT res_target = std::max(a_tol_abs, std::max(a_tol_rel,RT(1.e-16))*max_norm);
482
483 if (!is_nsolve && resnorm0 <= res_target) {
484 composite_norminf = resnorm0;
485 if (verbose >= 1) {
486 amrex::Print() << print_ident << "MLMG: No iterations needed\n";
487 }
488 } else {
489 auto iter_start_time = amrex::second();
490 bool converged = false;
491
492 const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
493 for (int iter = 0; iter < niters; ++iter)
494 {
495 oneIter(iter);
496
497 converged = false;
498
499 // Test convergence on the fine amr level
500 computeResidual(finest_amr_lev);
501
502 if (is_nsolve) { continue; }
503
504 RT fine_norminf = ResNormInf(finest_amr_lev);
505 m_iter_fine_resnorm0.push_back(fine_norminf);
506 composite_norminf = fine_norminf;
507 if (verbose >= 2) {
508 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1 << " Fine resid/"
509 << norm_name << " = " << fine_norminf/max_norm << "\n";
510 }
511 bool fine_converged = (fine_norminf <= res_target);
512
513 if (namrlevs == 1 && fine_converged) {
514 converged = true;
515 } else if (fine_converged) {
516 // finest level is converged, but we still need to test the coarse levels
517 computeMLResidual(finest_amr_lev-1);
518 RT crse_norminf = MLResNormInf(finest_amr_lev-1);
519 if (verbose >= 2) {
520 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1
521 << " Crse resid/" << norm_name << " = "
522 << crse_norminf/max_norm << "\n";
523 }
524 converged = (crse_norminf <= res_target);
525 composite_norminf = std::max(fine_norminf, crse_norminf);
526 } else {
527 converged = false;
528 }
529
530 if (converged) {
531 if (verbose >= 1) {
532 amrex::Print() << print_ident << "MLMG: Final Iter. " << iter+1
533 << " resid, resid/" << norm_name << " = "
534 << composite_norminf << ", "
535 << composite_norminf/max_norm << "\n";
536 }
537 break;
538 } else {
539 if (composite_norminf > RT(1.e20)*max_norm)
540 {
541 if (verbose > 0) {
542 amrex::Print() << print_ident << "MLMG: Failing to converge after " << iter+1 << " iterations."
543 << " resid, resid/" << norm_name << " = "
544 << composite_norminf << ", "
545 << composite_norminf/max_norm << "\n";
546 }
547
548 if ( throw_exception ) {
549 throw error("MLMG blew up.");
550 } else {
551 amrex::Abort("MLMG failing so lets stop here");
552 }
553 }
554 }
555 }
556
557 if (!converged && do_fixed_number_of_iters == 0) {
558 if (verbose > 0) {
559 amrex::Print() << print_ident << "MLMG: Failed to converge after " << max_iters << " iterations."
560 << " resid, resid/" << norm_name << " = "
561 << composite_norminf << ", "
562 << composite_norminf/max_norm << "\n";
563 }
564
565 if ( throw_exception ) {
566 throw error("MLMG failed to converge.");
567 } else {
568 amrex::Abort("MLMG failed.");
569 }
570 }
571 timer[iter_time] = amrex::second() - iter_start_time;
572 }
573
574 linop.postSolve(GetVecOfPtrs(sol));
575
576 IntVect ng_back = final_fill_bc ? IntVect(1) : IntVect(0);
577 if (linop.hasHiddenDimension()) {
578 ng_back[linop.hiddenDirection()] = 0;
579 }
580 for (int alev = 0; alev < namrlevs; ++alev)
581 {
582 if (!sol_is_alias[alev]) {
583 LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
584 }
585 }
586
587 timer[solve_time] = amrex::second() - solve_start_time;
588 if (verbose >= 1) {
589 ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
592 {
593 amrex::AllPrint() << print_ident << "MLMG: Timers: Solve = " << timer[solve_time]
594 << " Iter = " << timer[iter_time]
595 << " Bottom = " << timer[bottom_time] << "\n";
596 }
597 }
598
599 ++solve_called;
600
601 if (do_no_sync_gpu) {
602 (void)Gpu::setSingleStreamRegion(prev_in_single_stream_region);
603 (void)Gpu::setNoSyncRegion(prev_in_nosync_region);
604 }
605
606 return composite_norminf;
607}
608
609template <typename MF>
610auto
612 RT a_tol_rel, RT a_tol_abs) -> RT
613{
614 precond_mode = true;
615 std::swap(max_precond_iters, do_fixed_number_of_iters);
616 linop.beginPrecondBC();
617
618 auto r = solve(a_sol, a_rhs, a_tol_rel, a_tol_abs);
619
620 linop.endPrecondBC();
621 std::swap(max_precond_iters, do_fixed_number_of_iters);
622 precond_mode = false;
623
624 return r;
625}
626
627template <typename MF>
628void
630{
631 for (int alev = finest_amr_lev; alev >= 0; --alev) {
632 const MF* crse_bcdata = (alev > 0) ? a_sol[alev-1] : nullptr;
633 linop.prepareForFluxes(alev, crse_bcdata);
634 }
635}
636
637template <typename MF>
638template <typename AMF>
639void
641{
642 BL_PROFILE("MLMG::getGradSolution()");
643 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
644 if constexpr (std::is_same<AMF,MF>()) {
645 linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
646 } else {
648 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
649 auto const& amf = *(a_grad_sol[alev][idim]);
650 grad_sol[idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
651 }
652 linop.compGrad(alev, GetArrOfPtrs(grad_sol), sol[alev], a_loc);
653 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
654 LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp, IntVect(0));
655 }
656 }
657 }
658}
659
660template <typename MF>
661template <typename AMF>
662void
663MLMGT<MF>::getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol, Location a_loc)
664{
665 getGradSolution(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_grad_sol)), a_loc);
666}
667
668template <typename MF>
669template <typename AMF>
670void
672 Location a_loc)
673{
674 if (!linop.isCellCentered()) {
675 amrex::Abort("Calling wrong getFluxes for nodal solver");
676 }
677
678 AMREX_ASSERT(sol.size() == a_flux.size());
679
680 if constexpr (std::is_same<AMF,MF>()) {
681 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
682 } else {
683 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
684 for (int ilev = 0; ilev < namrlevs; ++ilev) {
685 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
686 auto const& amf = *(a_flux[ilev][idim]);
687 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
688 }
689 }
690 getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
691 for (int ilev = 0; ilev < namrlevs; ++ilev) {
692 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
693 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
694 }
695 }
696 }
697}
698
699template <typename MF>
700template <typename AMF>
701void
703 Location a_loc)
704{
705 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)), a_loc);
706}
707
708template <typename MF>
709template <typename AMF>
710void
712 const Vector<AMF*>& a_sol, Location a_loc)
713{
714 BL_PROFILE("MLMG::getFluxes()");
715
716 if (!linop.isCellCentered()) {
717 amrex::Abort("Calling wrong getFluxes for nodal solver");
718 }
719
720 if constexpr (std::is_same<AMF,MF>()) {
721 linop.getFluxes(a_flux, a_sol, a_loc);
722 } else {
723 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
724 for (int ilev = 0; ilev < namrlevs; ++ilev) {
725 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
726 auto const& amf = *(a_flux[ilev][idim]);
727 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
728 }
729 LocalCopy(sol[ilev], *a_sol[ilev], 0, 0, ncomp, nGrowVect(sol[ilev]));
730 }
731 linop.getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
732 for (int ilev = 0; ilev < namrlevs; ++ilev) {
733 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
734 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
735 }
736 }
737 }
738}
739
740template <typename MF>
741template <typename AMF>
742void
744 std::initializer_list<AMF*> a_sol, Location a_loc)
745{
746 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)),
747 Vector<AMF*>(std::move(a_sol)), a_loc);
748}
749
750template <typename MF>
751template <typename AMF>
752void
754{
755 AMREX_ASSERT(sol.size() == a_flux.size());
756 if constexpr (std::is_same<AMF,MF>()) {
757 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
758 } else {
759 Vector<MF> fluxes(namrlevs);
760 for (int ilev = 0; ilev < namrlevs; ++ilev) {
761 auto const& amf = *a_flux[ilev];
762 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
763 }
764 getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
765 for (int ilev = 0; ilev < namrlevs; ++ilev) {
766 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
767 }
768 }
769}
770
771template <typename MF>
772template <typename AMF>
773void
774MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux, Location a_loc)
775{
776 getFluxes(Vector<AMF*>(std::move(a_flux)), a_loc);
777}
778
779template <typename MF>
780template <typename AMF>
781void
783 const Vector<AMF*>& a_sol, Location /*a_loc*/)
784{
785 AMREX_ASSERT(nComp(*a_flux[0]) >= AMREX_SPACEDIM);
786
787 if constexpr (! std::is_same<AMF,MF>()) {
788 for (int alev = 0; alev < namrlevs; ++alev) {
789 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, nGrowVect(sol[alev]));
790 }
791 }
792
793 if (linop.isCellCentered())
794 {
795 Vector<Array<MF,AMREX_SPACEDIM> > ffluxes(namrlevs);
796 for (int alev = 0; alev < namrlevs; ++alev) {
797 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
798 const int mglev = 0;
799 int nghost = 0;
800 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
801 ffluxes[alev][idim].define(amrex::convert(linop.m_grids[alev][mglev],
803 linop.m_dmap[alev][mglev], ncomp, nghost, MFInfo(),
804 *linop.m_factory[alev][mglev]);
805 }
806 }
807 if constexpr (std::is_same<AMF,MF>()) {
808 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), a_sol, Location::FaceCenter);
809 } else {
810 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), GetVecOfPtrs(sol), Location::FaceCenter);
811 }
812 for (int alev = 0; alev < namrlevs; ++alev) {
813#ifdef AMREX_USE_EB
814 EB_average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
815#else
816 average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
817#endif
818 }
819
820 } else {
821 if constexpr (std::is_same<AMF,MF>()) {
822 linop.getFluxes(a_flux, a_sol);
823 } else {
824 Vector<MF> fluxes(namrlevs);
825 for (int ilev = 0; ilev < namrlevs; ++ilev) {
826 auto const& amf = *a_flux[ilev];
827 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
828 }
829 linop.getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol));
830 for (int ilev = 0; ilev < namrlevs; ++ilev) {
831 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
832 }
833 }
834 }
835}
836
837template <typename MF>
838template <typename AMF>
839void
840MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux,
841 std::initializer_list<AMF*> a_sol, Location a_loc)
842{
843 getFluxes(Vector<AMF*>(std::move(a_flux)),
844 Vector<AMF*>(std::move(a_sol)), a_loc);
845}
846
847#ifdef AMREX_USE_EB
848template <typename MF>
849void
851{
852 if (!linop.isCellCentered()) {
853 amrex::Abort("getEBFluxes is for cell-centered only");
854 }
855
856 AMREX_ASSERT(sol.size() == a_eb_flux.size());
857 getEBFluxes(a_eb_flux, GetVecOfPtrs(sol));
858}
859
860template <typename MF>
861void
862MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*>& a_sol)
863{
864 BL_PROFILE("MLMG::getEBFluxes()");
865
866 if (!linop.isCellCentered()) {
867 amrex::Abort("getEBFluxes is for cell-centered only");
868 }
869
870 linop.getEBFluxes(a_eb_flux, a_sol);
871}
872#endif
873
874template <typename MF>
875void
877 const Vector<MF const*>& a_rhs)
878{
879 BL_PROFILE("MLMG::compResidual()");
880
881 IntVect ng_sol(1);
882 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
883
884 sol.resize(namrlevs);
885 sol_is_alias.resize(namrlevs,true);
886 for (int alev = 0; alev < namrlevs; ++alev)
887 {
888 if (cf_strategy == CFStrategy::ghostnodes || nGrowVect(*a_sol[alev]) == ng_sol)
889 {
890 sol[alev] = linop.makeAlias(*a_sol[alev]);
891 sol_is_alias[alev] = true;
892 }
893 else
894 {
895 if (sol_is_alias[alev])
896 {
897 sol[alev] = linop.make(alev, 0, ng_sol);
898 }
899 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
900 }
901 }
902
903 prepareLinOp();
904
905 const auto& amrrr = linop.AMRRefRatio();
906
907 for (int alev = finest_amr_lev; alev >= 0; --alev) {
908 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
909 const MF* prhs = a_rhs[alev];
910#if (AMREX_SPACEDIM != 3)
911 int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
912 MF rhstmp(boxArray(*prhs), DistributionMap(*prhs), ncomp, nghost,
913 MFInfo(), *linop.Factory(alev));
914 LocalCopy(rhstmp, *prhs, 0, 0, ncomp, IntVect(nghost));
915 linop.applyMetricTerm(alev, 0, rhstmp);
916 linop.unimposeNeumannBC(alev, rhstmp);
917 linop.applyInhomogNeumannTerm(alev, rhstmp);
918 prhs = &rhstmp;
919#endif
920 linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
921 if (alev < finest_amr_lev) {
922 linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
923 *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
924 if (linop.isCellCentered()) {
925#ifdef AMREX_USE_EB
926 EB_average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
927#else
928 average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
929#endif
930 }
931 }
932 }
933
934
935#if (AMREX_SPACEDIM != 3)
936 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
937 linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
938 }
939#endif
940}
941
942template <typename MF>
943void
944MLMGT<MF>::apply (const Vector<MF*>& out, const Vector<MF*>& a_in)
945{
946 BL_PROFILE("MLMG::apply()");
947
948 Vector<MF*> in(namrlevs);
949 Vector<MF> in_raii(namrlevs);
950 Vector<MF> rh(namrlevs);
951 int nghost = 0;
952 IntVect ng_sol(1);
953 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
954
955 for (int alev = 0; alev < namrlevs; ++alev)
956 {
957 if (cf_strategy == CFStrategy::ghostnodes)
958 {
959 nghost = linop.getNGrow(alev);
960 in[alev] = a_in[alev];
961 }
962 else if (nGrowVect(*a_in[alev]) == ng_sol)
963 {
964 in[alev] = a_in[alev];
965 }
966 else
967 {
968 IntVect ng = ng_sol;
969 if (cf_strategy == CFStrategy::ghostnodes) { ng = IntVect(nghost); }
970 in_raii[alev] = linop.make(alev, 0, ng);
971 LocalCopy(in_raii[alev], *a_in[alev], 0, 0, ncomp, IntVect(nghost));
972 in[alev] = &(in_raii[alev]);
973 }
974 rh[alev] = linop.make(alev, 0, IntVect(nghost));
975 setVal(rh[alev], RT(0.0));
976 }
977
978 prepareLinOp();
979
980 for (int alev = 0; alev < namrlevs; ++alev) {
981 linop.applyInhomogNeumannTerm(alev, rh[alev]);
982 }
983
984 const auto& amrrr = linop.AMRRefRatio();
985
986 for (int alev = finest_amr_lev; alev >= 0; --alev) {
987 const MF* crse_bcdata = (alev > 0) ? in[alev-1] : nullptr;
988 linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
989 if (alev < finest_amr_lev) {
990 linop.reflux(alev, *out[alev], *in[alev], rh[alev],
991 *out[alev+1], *in[alev+1], rh[alev+1]);
992 if (linop.isCellCentered()) {
993 if constexpr (IsMultiFabLike_v<MF>) {
994#ifdef AMREX_USE_EB
995 EB_average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
996#else
997 average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
998#endif
999 } else {
1000 amrex::Abort("MLMG: TODO average_down for non-MultiFab");
1001 }
1002 }
1003 }
1004 }
1005
1006#if (AMREX_SPACEDIM != 3)
1007 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1008 linop.unapplyMetricTerm(alev, 0, *out[alev]);
1009 }
1010#endif
1011
1012 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1013 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
1014 Scale(*out[alev], RT(-1), 0, nComp(*out[alev]), nghost);
1015 }
1016}
1017
1018template <typename MF>
1019void
1021{
1022 precond_mode = true;
1023 linop.beginPrecondBC();
1024 apply(out, in);
1025 linop.endPrecondBC();
1026 precond_mode = false;
1027}
1028
1029template <typename MF>
1030template <typename AMF>
1031void
1033{
1034 BL_PROFILE("MLMG::prepareForSolve()");
1035
1036 AMREX_ASSERT(namrlevs <= a_sol.size());
1037 AMREX_ASSERT(namrlevs <= a_rhs.size());
1038
1039 timer.assign(ntimers, 0.0);
1040
1041 IntVect ng_rhs(0);
1042 IntVect ng_sol(1);
1043 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1044
1045 if (!linop_prepared) {
1046 linop.prepareForSolve();
1047 linop_prepared = true;
1048 } else if (linop.needsUpdate()) {
1049 linop.update();
1050
1051#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1052 hypre_solver.reset();
1053 hypre_bndry.reset();
1054 hypre_node_solver.reset();
1055#endif
1056
1057#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1058 petsc_solver.reset();
1059 petsc_bndry.reset();
1060#endif
1061 }
1062
1063 sol.resize(namrlevs);
1064 sol_is_alias.resize(namrlevs,false);
1065 for (int alev = 0; alev < namrlevs; ++alev)
1066 {
1067 if (cf_strategy == CFStrategy::ghostnodes)
1068 {
1069 if constexpr (std::is_same<AMF,MF>()) {
1070 sol[alev] = linop.makeAlias(*a_sol[alev]);
1071 sol_is_alias[alev] = true;
1072 } else {
1073 amrex::Abort("Type conversion not supported for CFStrategy::ghostnodes");
1074 }
1075 }
1076 else
1077 {
1078 if (nGrowVect(*a_sol[alev]) == ng_sol) {
1079 if constexpr (std::is_same<AMF,MF>()) {
1080 sol[alev] = linop.makeAlias(*a_sol[alev]);
1081 sol_is_alias[alev] = true;
1082 }
1083 }
1084 if (!sol_is_alias[alev]) {
1085 if (!solve_called) {
1086 sol[alev] = linop.make(alev, 0, ng_sol);
1087 }
1088 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
1089 setBndry(sol[alev], RT(0.0), 0, ncomp);
1090 }
1091 }
1092 }
1093
1094 rhs.resize(namrlevs);
1095 for (int alev = 0; alev < namrlevs; ++alev)
1096 {
1097 if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs = IntVect(linop.getNGrow(alev)); }
1098 if (!solve_called) {
1099 rhs[alev] = linop.make(alev, 0, ng_rhs);
1100 }
1101 LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
1102 linop.applyMetricTerm(alev, 0, rhs[alev]);
1103 linop.unimposeNeumannBC(alev, rhs[alev]);
1104 linop.applyInhomogNeumannTerm(alev, rhs[alev]);
1105 linop.applyOverset(alev, rhs[alev]);
1106 if ( ! precond_mode) {
1107 bool r = linop.scaleRHS(alev, &(rhs[alev]));
1109 }
1110
1111#ifdef AMREX_USE_EB
1112 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(linop.Factory(alev));
1113 if (factory && !factory->isAllRegular()) {
1114 if constexpr (std::is_same<MF,MultiFab>()) {
1115 EB_set_covered(rhs[alev], 0, ncomp, 0, RT(0.0));
1116 EB_set_covered(sol[alev], 0, ncomp, 0, RT(0.0));
1117 } else {
1118 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1119 }
1120 }
1121#endif
1122 }
1123
1124 for (int falev = finest_amr_lev; falev > 0; --falev)
1125 {
1126 linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1127 }
1128
1129 // enforce solvability if appropriate
1130 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1131 {
1132 makeSolvable();
1133 }
1134
1135 IntVect ng = linop.getNGrowVectRestriction();
1136 if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1137 if (!solve_called) {
1138 linop.make(res, ng);
1139 linop.make(rescor, ng);
1140 }
1141 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1142 {
1143 const int nmglevs = linop.NMGLevels(alev);
1144 for (int mglev = 0; mglev < nmglevs; ++mglev)
1145 {
1146 setVal(res [alev][mglev], RT(0.0));
1147 setVal(rescor[alev][mglev], RT(0.0));
1148 }
1149 }
1150
1151 if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1152 cor.resize(namrlevs);
1153 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1154 {
1155 const int nmglevs = linop.NMGLevels(alev);
1156 cor[alev].resize(nmglevs);
1157 for (int mglev = 0; mglev < nmglevs; ++mglev)
1158 {
1159 if (!solve_called) {
1160 IntVect _ng = ng;
1161 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1162 cor[alev][mglev] = linop.make(alev, mglev, _ng);
1163 }
1164 setVal(cor[alev][mglev], RT(0.0));
1165 }
1166 }
1167
1168 cor_hold.resize(std::max(namrlevs-1,1));
1169 {
1170 const int alev = 0;
1171 const int nmglevs = linop.NMGLevels(alev);
1172 cor_hold[alev].resize(nmglevs);
1173 for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1174 {
1175 if (!solve_called) {
1176 IntVect _ng = ng;
1177 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1178 cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1179 }
1180 setVal(cor_hold[alev][mglev], RT(0.0));
1181 }
1182 }
1183 for (int alev = 1; alev < finest_amr_lev; ++alev)
1184 {
1185 cor_hold[alev].resize(1);
1186 if (!solve_called) {
1187 IntVect _ng = ng;
1188 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev)); }
1189 cor_hold[alev][0] = linop.make(alev, 0, _ng);
1190 }
1191 setVal(cor_hold[alev][0], RT(0.0));
1192 }
1193
1194 if (linop.m_parent // no embedded N-Solve
1195 || !linop.supportNSolve())
1196 {
1197 do_nsolve = false;
1198 }
1199
1200 if (do_nsolve && ns_linop == nullptr)
1201 {
1202 prepareForNSolve();
1203 }
1204
1205 if (verbose >= 2) {
1206 amrex::Print() << print_ident << "MLMG: # of AMR levels: " << namrlevs << "\n"
1207 << print_ident << " # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1208 << "\n";
1209 if (ns_linop) {
1210 amrex::Print() << print_ident << " # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) << "\n"
1211 << print_ident << " # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() << "\n";
1212 }
1213 }
1214}
1215
1216template <typename MF>
1217void
1219{
1220 if (!linop_prepared) {
1221 linop.prepareForSolve();
1222 linop_prepared = true;
1223 } else if (linop.needsUpdate()) {
1224 linop.update();
1225 }
1226}
1227
1228template <typename MF>
1229void
1231{
1232 prepareLinOp();
1233 linop.preparePrecond();
1234}
1235
1236template <typename MF>
1237void
1239{
1240 if constexpr (IsMultiFabLike_v<MF>) {
1241 ns_linop = linop.makeNLinOp(nsolve_grid_size);
1242
1243 int nghost = 0;
1244 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1245
1246 const BoxArray& ba = (*ns_linop).m_grids[0][0];
1247 const DistributionMapping& dm =(*ns_linop).m_dmap[0][0];
1248
1249 int ng = 1;
1250 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1251 ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1252 ng = 0;
1253 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1254 ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1255 setVal(*ns_sol, RT(0.0));
1256 setVal(*ns_rhs, RT(0.0));
1257
1258 ns_linop->setLevelBC(0, ns_sol.get());
1259
1260 ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1261 ns_mlmg->setVerbose(0);
1262 ns_mlmg->setFixedIter(1);
1263 ns_mlmg->setMaxFmgIter(20);
1264 ns_mlmg->setBottomSolver(BottomSolver::smoother);
1265 }
1266}
1267
1268// in : Residual (res) on the finest AMR level
1269// out : sol on all AMR levels
1270template <typename MF>
1271void MLMGT<MF>::oneIter (int iter)
1272{
1273 BL_PROFILE("MLMG::oneIter()");
1274
1275 for (int alev = finest_amr_lev; alev > 0; --alev)
1276 {
1277 miniCycle(alev);
1278
1279 IntVect nghost(0);
1280 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1281 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1282
1283 // compute residual for the coarse AMR level
1284 computeResWithCrseSolFineCor(alev-1,alev);
1285
1286 if (alev != finest_amr_lev) {
1287 std::swap(cor_hold[alev][0], cor[alev][0]); // save it for the up cycle
1288 }
1289 }
1290
1291 // coarsest amr level
1292 {
1293 // enforce solvability if appropriate
1294 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1295 {
1296 makeSolvable(0,0,res[0][0]);
1297 }
1298
1299 if (iter < max_fmg_iters) {
1300 mgFcycle();
1301 } else {
1302 mgVcycle(0, 0);
1303 }
1304
1305 IntVect nghost(0);
1306 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(0)); }
1307 LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1308 }
1309
1310 for (int alev = 1; alev <= finest_amr_lev; ++alev)
1311 {
1312 // (Fine AMR correction) = I(Coarse AMR correction)
1313 interpCorrection(alev);
1314
1315 IntVect nghost(0);
1316 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1317 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1318
1319 if (alev != finest_amr_lev) {
1320 LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1321 }
1322
1323 // Update fine AMR level correction
1324 computeResWithCrseCorFineCor(alev);
1325
1326 miniCycle(alev);
1327
1328 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1329
1330 if (alev != finest_amr_lev) {
1331 LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1332 }
1333 }
1334
1335 linop.averageDownAndSync(sol);
1336}
1337
1338template <typename MF>
1339void
1341{
1342 BL_PROFILE("MLMG::miniCycle()");
1343 const int mglev = 0;
1344 mgVcycle(amrlev, mglev);
1345}
1346
1347// in : Residual (res)
1348// out : Correction (cor) from bottom to this function's local top
1349template <typename MF>
1350void
1351MLMGT<MF>::mgVcycle (int amrlev, int mglev_top)
1352{
1353 BL_PROFILE("MLMG::mgVcycle()");
1354
1355 const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1356
1357 for (int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1358 {
1359 BL_PROFILE_VAR("MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1360
1361 if (verbose >= 4)
1362 {
1363 RT norm = norminf(res[amrlev][mglev],0,ncomp,IntVect(0));
1364 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1365 << " DN: Norm before smooth " << norm << "\n";
1366 }
1367
1368 setVal(cor[amrlev][mglev], RT(0.0));
1369 bool skip_fillboundary = true;
1370 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary, nu1);
1371
1372 // rescor = res - L(cor)
1373 computeResOfCorrection(amrlev, mglev);
1374
1375 if (verbose >= 4)
1376 {
1377 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1378 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1379 << " DN: Norm after smooth " << norm << "\n";
1380 }
1381
1382 // res_crse = R(rescor_fine); this provides res/b to the level below
1383 linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1384 }
1385
1386 BL_PROFILE_VAR("MLMG::mgVcycle_bottom", blp_bottom);
1387 if (amrlev == 0)
1388 {
1389 if (verbose >= 4)
1390 {
1391 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1392 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1393 << " DN: Norm before bottom " << norm << "\n";
1394 }
1395 bottomSolve();
1396 if (verbose >= 4)
1397 {
1398 computeResOfCorrection(amrlev, mglev_bottom);
1399 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1400 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1401 << " UP: Norm after bottom " << norm << "\n";
1402 }
1403 }
1404 else
1405 {
1406 if (verbose >= 4)
1407 {
1408 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1409 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1410 << " Norm before smooth " << norm << "\n";
1411 }
1412 setVal(cor[amrlev][mglev_bottom], RT(0.0));
1413 bool skip_fillboundary = true;
1414 linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1415 res[amrlev][mglev_bottom], skip_fillboundary, nu1);
1416 if (verbose >= 4)
1417 {
1418 computeResOfCorrection(amrlev, mglev_bottom);
1419 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1420 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1421 << " Norm after smooth " << norm << "\n";
1422 }
1423 }
1424 BL_PROFILE_VAR_STOP(blp_bottom);
1425
1426 for (int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1427 {
1428 BL_PROFILE_VAR("MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1429 // cor_fine += I(cor_crse)
1430 addInterpCorrection(amrlev, mglev);
1431 if (verbose >= 4)
1432 {
1433 computeResOfCorrection(amrlev, mglev);
1434 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1435 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1436 << " UP: Norm before smooth " << norm << "\n";
1437 }
1438 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], false, nu2);
1439
1440 if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1441
1442 if (verbose >= 4)
1443 {
1444 computeResOfCorrection(amrlev, mglev);
1445 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1446 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1447 << " UP: Norm after smooth " << norm << "\n";
1448 }
1449 }
1450}
1451
1452// FMG cycle on the coarsest AMR level.
1453// in: Residual on the top MG level (i.e., 0)
1454// out: Correction (cor) on all MG levels
1455template <typename MF>
1456void
1458{
1459 BL_PROFILE("MLMG::mgFcycle()");
1460
1461#ifdef AMREX_USE_EB
1462 auto* pf = linop.Factory(0);
1463 auto is_all_regular = [pf] () {
1464 const auto *const f = dynamic_cast<EBFArrayBoxFactory const*>(pf);
1465 if (f) {
1466 return f->isAllRegular();
1467 } else {
1468 return true;
1469 }
1470 };
1471 amrex::ignore_unused(pf, is_all_regular);
1472 AMREX_ASSERT(linop.isCellCentered() || is_all_regular());
1473#endif
1474
1475 const int amrlev = 0;
1476 const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1477 IntVect nghost(0);
1478 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(amrlev)); }
1479
1480 for (int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1481 {
1482 linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1483 }
1484
1485 bottomSolve();
1486
1487 for (int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1488 {
1489 // cor_fine = I(cor_crse)
1490 interpCorrection(amrlev, mglev);
1491
1492 // rescor = res - L(cor)
1493 computeResOfCorrection(amrlev, mglev);
1494 // res = rescor; this provides b to the vcycle below
1495 LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1496
1497 // save cor; do v-cycle; add the saved to cor
1498 std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1499 mgVcycle(amrlev, mglev);
1500 LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1501 }
1502}
1503
1504// At the true bottom of the coarsest AMR level.
1505// in : Residual (res) as b
1506// out : Correction (cor) as x
1507template <typename MF>
1508void
1510{
1511 if (do_nsolve)
1512 {
1513 NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1514 }
1515 else
1516 {
1517 actualBottomSolve();
1518 }
1519}
1520
1521template <typename MF>
1522void
1523MLMGT<MF>::NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs)
1524{
1525 BL_PROFILE("MLMG::NSolve()");
1526
1527 setVal(a_sol, RT(0.0));
1528
1529 MF const& res_bottom = res[0].back();
1530 if (BoxArray::SameRefs(boxArray(a_rhs),boxArray(res_bottom)) &&
1532 {
1533 LocalCopy(a_rhs, res_bottom, 0, 0, ncomp, IntVect(0));
1534 } else {
1535 setVal(a_rhs, RT(0.0));
1536 ParallelCopy(a_rhs, res_bottom, 0, 0, ncomp);
1537 }
1538
1539 a_solver.solve(Vector<MF*>{&a_sol}, Vector<MF const*>{&a_rhs},
1540 RT(-1.0), RT(-1.0));
1541
1542 linop.copyNSolveSolution(cor[0].back(), a_sol);
1543}
1544
1545template <typename MF>
1546void
1548{
1549 BL_PROFILE("MLMG::actualBottomSolve()");
1550
1551 if (!linop.isBottomActive()) { return; }
1552
1553 auto bottom_start_time = amrex::second();
1554
1555 ParallelContext::push(linop.BottomCommunicator());
1556
1557 const int amrlev = 0;
1558 const int mglev = linop.NMGLevels(amrlev) - 1;
1559 auto& x = cor[amrlev][mglev];
1560 auto& b = res[amrlev][mglev];
1561
1562 setVal(x, RT(0.0));
1563
1564 if (bottom_solver == BottomSolver::smoother)
1565 {
1566 bool skip_fillboundary = true;
1567 linop.smooth(amrlev, mglev, x, b, skip_fillboundary, nuf);
1568 }
1569 else
1570 {
1571 MF* bottom_b = &b;
1572 MF raii_b;
1573 if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1574 {
1575 const IntVect ng = nGrowVect(b);
1576 raii_b = linop.make(amrlev, mglev, ng);
1577 LocalCopy(raii_b, b, 0, 0, ncomp, ng);
1578 bottom_b = &raii_b;
1579
1580 makeSolvable(amrlev,mglev,*bottom_b);
1581 }
1582
1583 if (bottom_solver == BottomSolver::hypre)
1584 {
1585#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1586 if constexpr (std::is_same<MF,MultiFab>()) {
1587 bottomSolveWithHypre(x, *bottom_b);
1588 } else
1589#endif
1590 {
1591 amrex::Abort("Using Hypre as bottom solver not supported in this case");
1592 }
1593 }
1594 else if (bottom_solver == BottomSolver::petsc)
1595 {
1596#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1597 if constexpr (std::is_same<MF,MultiFab>()) {
1598 bottomSolveWithPETSc(x, *bottom_b);
1599 } else
1600#endif
1601 {
1602 amrex::Abort("Using PETSc as bottom solver not supported in this case");
1603 }
1604 }
1605 else
1606 {
1607 typename MLCGSolverT<MF>::Type cg_type;
1608 if (bottom_solver == BottomSolver::cg ||
1609 bottom_solver == BottomSolver::cgbicg) {
1610 cg_type = MLCGSolverT<MF>::Type::CG;
1611 } else {
1613 }
1614
1615 int ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1616
1617 if (ret != 0 && (bottom_solver == BottomSolver::cgbicg ||
1618 bottom_solver == BottomSolver::bicgcg))
1619 {
1620 if (bottom_solver == BottomSolver::cgbicg) {
1621 cg_type = MLCGSolverT<MF>::Type::BiCGStab; // switch to bicg
1622 } else {
1623 cg_type = MLCGSolverT<MF>::Type::CG; // switch to cg
1624 }
1625 setVal(cor[amrlev][mglev], RT(0.0));
1626 ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1627 if (ret == 0) { // switch permanently
1628 if (cg_type == MLCGSolverT<MF>::Type::CG) {
1629 bottom_solver = BottomSolver::cg;
1630 } else {
1631 bottom_solver = BottomSolver::bicgstab;
1632 }
1633 }
1634 }
1635
1636 // If the bottom solve failed then set the correction to zero
1637 if (ret != 0 && ret != 9) {
1638 setVal(cor[amrlev][mglev], RT(0.0));
1639 }
1640 const int n = (ret==0) ? nub : nuf;
1641 linop.smooth(amrlev, mglev, x, b, false, n);
1642 }
1643 }
1644
1646
1647 if (! timer.empty()) {
1648 timer[bottom_time] += amrex::second() - bottom_start_time;
1649 }
1650}
1651
1652template <typename MF>
1653int
1655{
1656 MLCGSolverT<MF> cg_solver(linop);
1657 cg_solver.setSolver(type);
1658 cg_solver.setVerbose(bottom_verbose);
1659 cg_solver.setPrintIdentation(print_ident);
1660 cg_solver.setMaxIter(bottom_maxiter);
1661 cg_solver.setInitSolnZeroed(true);
1662 if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.setNGhost(linop.getNGrow()); }
1663
1664 int ret = cg_solver.solve(x, b, bottom_reltol, bottom_abstol);
1665 if (ret != 0 && verbose > 1) {
1666 amrex::Print() << print_ident << "MLMG: Bottom solve failed.\n";
1667 }
1668 m_niters_cg.push_back(cg_solver.getNumIters());
1669 return ret;
1670}
1671
1672// Compute multi-level Residual (res) up to amrlevmax.
1673template <typename MF>
1674void
1676{
1677 BL_PROFILE("MLMG::computeMLResidual()");
1678
1679 const int mglev = 0;
1680 for (int alev = amrlevmax; alev >= 0; --alev) {
1681 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1682 linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1683 if (alev < finest_amr_lev) {
1684 linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1685 res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1686 }
1687 }
1688}
1689
1690// Compute single AMR level residual without masking.
1691template <typename MF>
1692void
1694{
1695 BL_PROFILE("MLMG::computeResidual()");
1696 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1697 linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1698}
1699
1700// Compute coarse AMR level composite residual with coarse solution and fine correction
1701template <typename MF>
1702void
1704{
1705 BL_PROFILE("MLMG::computeResWithCrseSolFineCor()");
1706
1707 IntVect nghost(0);
1708 if (cf_strategy == CFStrategy::ghostnodes) {
1709 nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1710 }
1711
1712 MF& crse_sol = sol[calev];
1713 const MF& crse_rhs = rhs[calev];
1714 MF& crse_res = res[calev][0];
1715
1716 MF& fine_sol = sol[falev];
1717 const MF& fine_rhs = rhs[falev];
1718 MF& fine_cor = cor[falev][0];
1719 MF& fine_res = res[falev][0];
1720 MF& fine_rescor = rescor[falev][0];
1721
1722 const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) : nullptr;
1723 linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1724
1725 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1726 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1727
1728 linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1729
1730 linop.avgDownResAmr(calev, crse_res, fine_res);
1731}
1732
1733// Compute fine AMR level residual fine_res = fine_res - L(fine_cor) with coarse providing BC.
1734template <typename MF>
1735void
1737{
1738 BL_PROFILE("MLMG::computeResWithCrseCorFineCor()");
1739
1740 IntVect nghost(0);
1741 if (cf_strategy == CFStrategy::ghostnodes) {
1742 nghost = IntVect(linop.getNGrow(falev));
1743 }
1744
1745 const MF& crse_cor = cor[falev-1][0];
1746
1747 MF& fine_cor = cor [falev][0];
1748 MF& fine_res = res [falev][0];
1749 MF& fine_rescor = rescor[falev][0];
1750
1751 // fine_rescor = fine_res - L(fine_cor)
1752 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1753 BCMode::Inhomogeneous, &crse_cor);
1754 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1755}
1756
1757// Interpolate correction from coarse to fine AMR level.
1758template <typename MF>
1759void
1761{
1762 BL_PROFILE("MLMG::interpCorrection_1");
1763
1764 IntVect nghost(0);
1765 if (cf_strategy == CFStrategy::ghostnodes) {
1766 nghost = IntVect(linop.getNGrow(alev));
1767 }
1768
1769 MF const& crse_cor = cor[alev-1][0];
1770 MF & fine_cor = cor[alev ][0];
1771
1772 const Geometry& crse_geom = linop.Geom(alev-1,0);
1773
1774 int ng_src = 0;
1775 int ng_dst = linop.isCellCentered() ? 1 : 0;
1776 if (cf_strategy == CFStrategy::ghostnodes)
1777 {
1778 ng_src = linop.getNGrow(alev-1);
1779 ng_dst = linop.getNGrow(alev-1);
1780 }
1781
1782 MF cfine = linop.makeCoarseAmr(alev, IntVect(ng_dst));
1783 setVal(cfine, RT(0.0));
1784 ParallelCopy(cfine, crse_cor, 0, 0, ncomp, IntVect(ng_src), IntVect(ng_dst),
1785 crse_geom.periodicity());
1786
1787 linop.interpolationAmr(alev, fine_cor, cfine, nghost); // NOLINT(readability-suspicious-call-argument)
1788}
1789
1790// Interpolate correction between MG levels
1791// inout: Correction (cor) on coarse MG lev. (out due to FillBoundary)
1792// out : Correction (cor) on fine MG lev.
1793template <typename MF>
1794void
1795MLMGT<MF>::interpCorrection (int alev, int mglev)
1796{
1797 BL_PROFILE("MLMG::interpCorrection_2");
1798
1799 MF& crse_cor = cor[alev][mglev+1];
1800 MF& fine_cor = cor[alev][mglev ];
1801 linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1802}
1803
1804// (Fine MG level correction) += I(Coarse MG level correction)
1805template <typename MF>
1806void
1808{
1809 BL_PROFILE("MLMG::addInterpCorrection()");
1810
1811 const MF& crse_cor = cor[alev][mglev+1];
1812 MF& fine_cor = cor[alev][mglev ];
1813
1814 MF cfine;
1815 const MF* cmf;
1816
1817 if (linop.isMFIterSafe(alev, mglev, mglev+1))
1818 {
1819 cmf = &crse_cor;
1820 }
1821 else
1822 {
1823 cfine = linop.makeCoarseMG(alev, mglev, IntVect(0));
1824 ParallelCopy(cfine, crse_cor, 0, 0, ncomp);
1825 cmf = &cfine;
1826 }
1827
1828 linop.interpolation(alev, mglev, fine_cor, *cmf);
1829}
1830
1831// Compute rescor = res - L(cor)
1832// in : res
1833// inout: cor (out due to FillBoundary in linop.correctionResidual)
1834// out : rescor
1835template <typename MF>
1836void
1838{
1839 BL_PROFILE("MLMG:computeResOfCorrection()");
1840 MF & x = cor[amrlev][mglev];
1841 const MF& b = res[amrlev][mglev];
1842 MF & r = rescor[amrlev][mglev];
1843 linop.correctionResidual(amrlev, mglev, r, x, b, BCMode::Homogeneous);
1844}
1845
1846// Compute single-level masked inf-norm of Residual (res).
1847template <typename MF>
1848auto
1849MLMGT<MF>::ResNormInf (int alev, bool local) -> RT
1850{
1851 BL_PROFILE("MLMG::ResNormInf()");
1852 return linop.normInf(alev, res[alev][0], local);
1853}
1854
1855// Computes multi-level masked inf-norm of Residual (res).
1856template <typename MF>
1857auto
1858MLMGT<MF>::MLResNormInf (int alevmax, bool local) -> RT
1859{
1860 BL_PROFILE("MLMG::MLResNormInf()");
1861 RT r = RT(0.0);
1862 for (int alev = 0; alev <= alevmax; ++alev)
1863 {
1864 r = std::max(r, ResNormInf(alev,true));
1865 }
1867 return r;
1868}
1869
1870// Compute multi-level masked inf-norm of RHS (rhs).
1871template <typename MF>
1872auto
1874{
1875 BL_PROFILE("MLMG::MLRhsNormInf()");
1876 RT r = RT(0.0);
1877 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1878 auto t = linop.normInf(alev, rhs[alev], true);
1879 r = std::max(r, t);
1880 }
1882 return r;
1883}
1884
1885template <typename MF>
1886void
1888{
1889 auto const& offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1890 if (verbose >= 4) {
1891 for (int c = 0; c < ncomp; ++c) {
1892 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c] << " from rhs component "
1893 << c << "\n";
1894 }
1895 }
1896 for (int alev = 0; alev < namrlevs; ++alev) {
1897 linop.fixSolvabilityByOffset(alev, 0, rhs[alev], offset);
1898 }
1899}
1900
1901template <typename MF>
1902void
1903MLMGT<MF>::makeSolvable (int amrlev, int mglev, MF& mf)
1904{
1905 auto const& offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1906 if (verbose >= 4) {
1907 for (int c = 0; c < ncomp; ++c) {
1908 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c]
1909 << " from mf component c = " << c
1910 << " on level (" << amrlev << ", " << mglev << ")\n";
1911 }
1912 }
1913 linop.fixSolvabilityByOffset(amrlev, mglev, mf, offset);
1914}
1915
1916#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1917template <typename MF>
1918template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1919void
1920MLMGT<MF>::bottomSolveWithHypre (MF& x, const MF& b)
1921{
1922 const int amrlev = 0;
1923 const int mglev = linop.NMGLevels(amrlev) - 1;
1924
1925 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithHypre doesn't work with ncomp > 1");
1926
1927 if (linop.isCellCentered())
1928 {
1929 if (hypre_solver == nullptr) // We should reuse the setup
1930 {
1931 hypre_solver = linop.makeHypre(hypre_interface);
1932
1933 hypre_solver->setVerbose(bottom_verbose);
1934 if (hypre_interface == amrex::Hypre::Interface::ij) {
1935 hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1936 } else {
1937 hypre_solver->setHypreOldDefault(hypre_old_default);
1938 hypre_solver->setHypreRelaxType(hypre_relax_type);
1939 hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1940 hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1941 hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1942 }
1943
1944 const BoxArray& ba = linop.m_grids[amrlev].back();
1945 const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1946 const Geometry& geom = linop.m_geom[amrlev].back();
1947
1948 hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1949 hypre_bndry->setHomogValues();
1950 const Real* dx = linop.m_geom[0][0].CellSize();
1951 IntVect crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1952 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1953 0.5*dx[1]*crse_ratio[1],
1954 0.5*dx[2]*crse_ratio[2]));
1955 hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1956 linop.m_coarse_fine_bc_type);
1957 }
1958
1959 // IJ interface understands absolute tolerance API of hypre
1960 amrex::Real hypre_abstol =
1961 (hypre_interface == amrex::Hypre::Interface::ij)
1962 ? bottom_abstol : Real(-1.0);
1963 hypre_solver->solve(
1964 x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1965 linop.getMaxOrder());
1966 }
1967 else
1968 {
1969 if (hypre_node_solver == nullptr)
1970 {
1971 hypre_node_solver =
1972 linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1973 }
1974 hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1975 }
1976
1977 // For singular problems there may be a large constant added to all values of the solution
1978 // For precision reasons we enforce that the average of the correction from hypre is 0
1979 if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1980 {
1981 makeSolvable(amrlev, mglev, x);
1982 }
1983}
1984#endif
1985
1986#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1987template <typename MF>
1988template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1989void
1990MLMGT<MF>::bottomSolveWithPETSc (MF& x, const MF& b)
1991{
1992 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithPETSc doesn't work with ncomp > 1");
1993
1994 if(petsc_solver == nullptr)
1995 {
1996 petsc_solver = linop.makePETSc();
1997 petsc_solver->setVerbose(bottom_verbose);
1998
1999 const BoxArray& ba = linop.m_grids[0].back();
2000 const DistributionMapping& dm = linop.m_dmap[0].back();
2001 const Geometry& geom = linop.m_geom[0].back();
2002
2003 petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
2004 petsc_bndry->setHomogValues();
2005 const Real* dx = linop.m_geom[0][0].CellSize();
2006 auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
2007 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
2008 0.5*dx[1]*crse_ratio[1],
2009 0.5*dx[2]*crse_ratio[2]));
2010 petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
2011 linop.m_coarse_fine_bc_type);
2012 }
2013 petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
2014 linop.getMaxOrder());
2015}
2016#endif
2017
2018template <typename MF>
2019void
2021 const Vector<MultiFab const*>& a_rhs,
2022 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const
2023{
2024 std::string file_name(a_file_name);
2025 UtilCreateCleanDirectory(file_name, false);
2026
2028 {
2029 std::string HeaderFileName(std::string(a_file_name)+"/Header");
2030 std::ofstream HeaderFile;
2031 HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
2032 std::ofstream::trunc |
2033 std::ofstream::binary);
2034 if( ! HeaderFile.good()) {
2035 FileOpenFailed(HeaderFileName);
2036 }
2037
2038 HeaderFile.precision(17);
2039
2040 std::string norm_name = getEnumNameString(norm_type);
2041
2042 HeaderFile << linop.name() << "\n"
2043 << "a_tol_rel = " << a_tol_rel << "\n"
2044 << "a_tol_abs = " << a_tol_abs << "\n"
2045 << "verbose = " << verbose << "\n"
2046 << "max_iters = " << max_iters << "\n"
2047 << "nu1 = " << nu1 << "\n"
2048 << "nu2 = " << nu2 << "\n"
2049 << "nuf = " << nuf << "\n"
2050 << "nub = " << nub << "\n"
2051 << "max_fmg_iters = " << max_fmg_iters << "\n"
2052 << "bottom_solver = " << static_cast<int>(bottom_solver) << "\n"
2053 << "bottom_verbose = " << bottom_verbose << "\n"
2054 << "bottom_maxiter = " << bottom_maxiter << "\n"
2055 << "bottom_reltol = " << bottom_reltol << "\n"
2056 << "convergence_norm = " << norm_name << "\n"
2057 << "namrlevs = " << namrlevs << "\n"
2058 << "finest_amr_lev = " << finest_amr_lev << "\n"
2059 << "linop_prepared = " << linop_prepared << "\n"
2060 << "solve_called = " << solve_called << "\n";
2061
2062 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2063 UtilCreateCleanDirectory(file_name+"/Level_"+std::to_string(ilev), false);
2064 }
2065 }
2066
2068
2069 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2070 VisMF::Write(*a_sol[ilev], file_name+"/Level_"+std::to_string(ilev)+"/sol");
2071 VisMF::Write(*a_rhs[ilev], file_name+"/Level_"+std::to_string(ilev)+"/rhs");
2072 }
2073
2074 linop.checkPoint(file_name+"/linop");
2075}
2076
2077template <typename MF>
2078void
2080{
2081 print_ident.resize(print_ident.size()+4, ' ');
2082}
2083
2084template <typename MF>
2085void
2087{
2088 if (print_ident.size() > 4) {
2089 print_ident.resize(print_ident.size()-4, ' ');
2090 } else {
2091 print_ident.clear();
2092 }
2093}
2094
2095extern template class MLMGT<MultiFab>;
2096
2098
2099}
2100
2101#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition AMReX_BLProfiler.H:560
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:206
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
Print on all processors of the default communicator.
Definition AMReX_Print.H:117
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:551
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:824
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:41
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition AMReX_DistributionMapping.H:187
Definition AMReX_EBFabFactory.H:24
bool isAllRegular() const noexcept
Definition AMReX_EBFabFactory.cpp:148
Solve using GMRES with multigrid as preconditioner.
Definition AMReX_GMRES_MLMG.H:21
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:355
Interface
Definition AMReX_Hypre.H:21
__host__ __device__ bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:423
__host__ __device__ IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition AMReX_IntVect.H:773
__host__ static __device__ constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:696
Definition AMReX_MLCGSolver.H:12
void setSolver(Type _typ) noexcept
Definition AMReX_MLCGSolver.H:28
void setVerbose(int _verbose)
Definition AMReX_MLCGSolver.H:39
int getNumIters() const noexcept
Definition AMReX_MLCGSolver.H:64
void setInitSolnZeroed(bool _sol_zeroed)
Definition AMReX_MLCGSolver.H:53
void setPrintIdentation(std::string s)
Definition AMReX_MLCGSolver.H:45
int solve(MF &solnL, const MF &rhsL, RT eps_rel, RT eps_abs)
Definition AMReX_MLCGSolver.H:89
Type
Definition AMReX_MLCGSolver.H:18
void setNGhost(int _nghost)
Definition AMReX_MLCGSolver.H:56
void setMaxIter(int _maxiter)
Definition AMReX_MLCGSolver.H:42
Definition AMReX_MLLinOp.H:98
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLLinOp.H:108
typename FabDataType< MF >::value_type RT
Definition AMReX_MLLinOp.H:109
Definition AMReX_MLMG.H:21
Definition AMReX_MLMG.H:16
void prepareForFluxes(Vector< MF const * > const &a_sol)
Definition AMReX_MLMG.H:629
void setBottomVerbose(int v) noexcept
Definition AMReX_MLMG.H:147
void setMaxFmgIter(int n) noexcept
Definition AMReX_MLMG.H:135
RT MLResNormInf(int alevmax, bool local=false)
Definition AMReX_MLMG.H:1858
RT MLRhsNormInf(bool local=false)
Definition AMReX_MLMG.H:1873
void setNoGpuSync(bool do_not_sync) noexcept
Definition AMReX_MLMG.H:165
MLMGT(MLMGT< MF > &&)=delete
void actualBottomSolve()
Definition AMReX_MLMG.H:1547
Vector< Vector< MF > > rescor
Definition AMReX_MLMG.H:342
MF MFType
Definition AMReX_MLMG.H:29
BottomSolver getBottomSolver() const noexcept
Definition AMReX_MLMG.H:145
bool linop_prepared
Definition AMReX_MLMG.H:286
int bottom_verbose
Definition AMReX_MLMG.H:272
void checkPoint(const Vector< MultiFab * > &a_sol, const Vector< MultiFab const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *a_file_name) const
Definition AMReX_MLMG.H:2020
void setPreSmooth(int n) noexcept
Definition AMReX_MLMG.H:139
void setBottomToleranceAbs(RT t) noexcept
Definition AMReX_MLMG.H:150
Vector< Vector< MF > > cor
= rhs - L(sol)
Definition AMReX_MLMG.H:340
RT getFinalResidual() const noexcept
Definition AMReX_MLMG.H:245
int do_nsolve
N Solve.
Definition AMReX_MLMG.H:290
void interpCorrection(int alev)
Definition AMReX_MLMG.H:1760
void getEBFluxes(const Vector< MF * > &a_eb_flux)
Definition AMReX_MLMG.H:850
Long solve_called
Definition AMReX_MLMG.H:287
void getGradSolution(const Vector< Array< AMF *, 3 > > &a_grad_sol, Location a_loc=Location::FaceCenter)
Definition AMReX_MLMG.H:640
void setBottomSmooth(int n) noexcept
Definition AMReX_MLMG.H:142
int final_fill_bc
Definition AMReX_MLMG.H:279
MLMGNormType norm_type
Definition AMReX_MLMG.H:277
bool precond_mode
Definition AMReX_MLMG.H:255
void setNSolve(int flag) noexcept
Definition AMReX_MLMG.H:162
int getBottomVerbose() const
Definition AMReX_MLMG.H:127
std::unique_ptr< MF > ns_sol
Definition AMReX_MLMG.H:294
std::string print_ident
Definition AMReX_MLMG.H:297
Vector< Vector< MF > > cor_hold
Definition AMReX_MLMG.H:341
void computeResOfCorrection(int amrlev, int mglev)
Definition AMReX_MLMG.H:1837
void applyPrecond(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in) as a preconditioner
Definition AMReX_MLMG.H:1020
void setCFStrategy(CFStrategy a_cf_strategy) noexcept
Definition AMReX_MLMG.H:146
void computeResWithCrseCorFineCor(int falev)
Definition AMReX_MLMG.H:1736
void NSolve(MLMGT< MF > &a_solver, MF &a_sol, MF &a_rhs)
Definition AMReX_MLMG.H:1523
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLMG.H:34
RT m_final_resnorm0
Definition AMReX_MLMG.H:350
void apply(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in). Note that, if no actual solve is needed, one could turn off multigrid coarsening by cons...
Definition AMReX_MLMG.H:944
Vector< MF > rhs
Definition AMReX_MLMG.H:330
void getFluxes(const Vector< Array< AMF *, 3 > > &a_flux, Location a_loc=Location::FaceCenter)
For (alpha * a - beta * (del dot b grad)) phi = rhs, flux means -b grad phi
Definition AMReX_MLMG.H:671
void setNSolveGridSize(int s) noexcept
Definition AMReX_MLMG.H:163
int nuf
when smoother is used as bottom solver
Definition AMReX_MLMG.H:265
bool do_no_sync_gpu
Definition AMReX_MLMG.H:299
void setVerbose(int v) noexcept
Definition AMReX_MLMG.H:133
int nu1
pre
Definition AMReX_MLMG.H:263
void computeMLResidual(int amrlevmax)
Definition AMReX_MLMG.H:1675
Vector< RT > m_iter_fine_resnorm0
Definition AMReX_MLMG.H:352
RT getInitResidual() const noexcept
Definition AMReX_MLMG.H:243
int getNumIters() const noexcept
Definition AMReX_MLMG.H:248
void setPostSmooth(int n) noexcept
Definition AMReX_MLMG.H:140
BottomSolver bottom_solver
Definition AMReX_MLMG.H:270
void mgVcycle(int amrlev, int mglev)
Definition AMReX_MLMG.H:1351
MLLinOpT< MF > & linop
Definition AMReX_MLMG.H:281
int ncomp
Definition AMReX_MLMG.H:282
void prepareForNSolve()
Definition AMReX_MLMG.H:1238
RT precond(Vector< MF * > const &a_sol, Vector< MF const * > const &a_rhs, RT a_tol_rel, RT a_tol_abs)
Definition AMReX_MLMG.H:611
int max_fmg_iters
Definition AMReX_MLMG.H:268
void makeSolvable()
Definition AMReX_MLMG.H:1887
void setBottomSolver(BottomSolver s) noexcept
Definition AMReX_MLMG.H:144
int namrlevs
Definition AMReX_MLMG.H:283
void preparePrecond()
Definition AMReX_MLMG.H:1230
int nub
additional smoothing after bottom cg solver
Definition AMReX_MLMG.H:266
void incPrintIdentation()
Definition AMReX_MLMG.H:2079
typename MLLinOpT< MF >::RT RT
Definition AMReX_MLMG.H:31
int max_iters
Definition AMReX_MLMG.H:259
int do_fixed_number_of_iters
Definition AMReX_MLMG.H:260
void setThrowException(bool t) noexcept
Definition AMReX_MLMG.H:132
void decPrintIdentation()
Definition AMReX_MLMG.H:2086
void setFixedIter(int nit) noexcept
Definition AMReX_MLMG.H:136
int finest_amr_lev
Definition AMReX_MLMG.H:284
Vector< RT > const & getResidualHistory() const noexcept
Definition AMReX_MLMG.H:247
void prepareLinOp()
Definition AMReX_MLMG.H:1218
RT m_rhsnorm0
Definition AMReX_MLMG.H:348
void setPrecondIter(int nit) noexcept
Definition AMReX_MLMG.H:137
CFStrategy
Definition AMReX_MLMG.H:37
void prepareForSolve(Vector< AMF * > const &a_sol, Vector< AMF const * > const &a_rhs)
Definition AMReX_MLMG.H:1032
int bottomSolveWithCG(MF &x, const MF &b, typename MLCGSolverT< MF >::Type type)
Definition AMReX_MLMG.H:1654
std::unique_ptr< MLMGT< MF > > ns_mlmg
Definition AMReX_MLMG.H:293
void setAlwaysUseBNorm(int flag) noexcept
Definition AMReX_MLMG.H:370
void compResidual(const Vector< MF * > &a_res, const Vector< MF * > &a_sol, const Vector< MF const * > &a_rhs)
Definition AMReX_MLMG.H:876
void miniCycle(int amrlev)
Definition AMReX_MLMG.H:1340
RT solve(std::initializer_list< AMF * > a_sol, std::initializer_list< AMF const * > a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
int bottom_maxiter
Definition AMReX_MLMG.H:273
bool throw_exception
Definition AMReX_MLMG.H:256
RT bottom_reltol
Definition AMReX_MLMG.H:274
Vector< Vector< MF > > res
First Vector: Amr levels. 0 is the coarest level Second Vector: MG levels. 0 is the finest level.
Definition AMReX_MLMG.H:339
void setFinalFillBC(int flag) noexcept
Definition AMReX_MLMG.H:158
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLMG.H:33
void setConvergenceNormType(MLMGNormType norm) noexcept
Definition AMReX_MLMG.H:156
void computeResWithCrseSolFineCor(int calev, int falev)
Definition AMReX_MLMG.H:1703
MLMGT< MF > & operator=(MLMGT< MF > const &)=delete
Vector< MF > sol
Hypre.
Definition AMReX_MLMG.H:329
int nsolve_grid_size
Definition AMReX_MLMG.H:291
MLMGT(MLLinOpT< MF > &a_lp)
Definition AMReX_MLMG.H:361
int verbose
Definition AMReX_MLMG.H:257
void computeResidual(int alev)
Definition AMReX_MLMG.H:1693
MLLinOpT< MF > & getLinOp()
Definition AMReX_MLMG.H:251
std::unique_ptr< MF > ns_rhs
Definition AMReX_MLMG.H:295
timer_types
Definition AMReX_MLMG.H:345
@ ntimers
Definition AMReX_MLMG.H:345
@ bottom_time
Definition AMReX_MLMG.H:345
@ iter_time
Definition AMReX_MLMG.H:345
@ solve_time
Definition AMReX_MLMG.H:345
CFStrategy cf_strategy
Definition AMReX_MLMG.H:271
typename MLLinOpT< MF >::FAB FAB
Definition AMReX_MLMG.H:30
Vector< int > sol_is_alias
Definition AMReX_MLMG.H:333
RT getBottomToleranceAbs() const noexcept
Definition AMReX_MLMG.H:151
int numAMRLevels() const noexcept
Definition AMReX_MLMG.H:160
RT bottom_abstol
Definition AMReX_MLMG.H:275
Vector< int > m_niters_cg
Definition AMReX_MLMG.H:351
MLMGT(MLMGT< MF > const &)=delete
void mgFcycle()
Definition AMReX_MLMG.H:1457
Vector< double > timer
Definition AMReX_MLMG.H:346
RT getInitRHS() const noexcept
Definition AMReX_MLMG.H:241
RT ResNormInf(int alev, bool local=false)
Definition AMReX_MLMG.H:1849
Vector< int > const & getNumCGIters() const noexcept
Definition AMReX_MLMG.H:249
int nu2
post
Definition AMReX_MLMG.H:264
int max_precond_iters
Definition AMReX_MLMG.H:261
void bottomSolve()
Definition AMReX_MLMG.H:1509
void setBottomTolerance(RT t) noexcept
Definition AMReX_MLMG.H:149
void setFinalSmooth(int n) noexcept
Definition AMReX_MLMG.H:141
std::unique_ptr< MLLinOpT< MF > > ns_linop
Definition AMReX_MLMG.H:292
void addInterpCorrection(int alev, int mglev)
Definition AMReX_MLMG.H:1807
RT m_init_resnorm0
Definition AMReX_MLMG.H:349
int getVerbose() const
Definition AMReX_MLMG.H:126
RT solve(const Vector< AMF * > &a_sol, const Vector< AMF const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setBottomMaxIter(int n) noexcept
Definition AMReX_MLMG.H:148
void oneIter(int iter)
Definition AMReX_MLMG.H:1271
void setMaxIter(int n) noexcept
Definition AMReX_MLMG.H:134
This class provides the user with a few print options.
Definition AMReX_Print.H:35
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
static Long Write(const FabArray< FArrayBox > &mf, const std::string &name, VisMF::How how=NFiles, bool set_ghost=false)
Write a FabArray<FArrayBox> to disk in a "smart" way. Returns the total number of bytes written on th...
Definition AMReX_VisMF.cpp:979
bool setNoSyncRegion(bool b) noexcept
Definition AMReX_GpuControl.H:158
bool setSingleStreamRegion(bool b) noexcept
Definition AMReX_GpuControl.H:154
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:126
void push(MPI_Comm c)
Definition AMReX_ParallelContext.H:102
void BarrierSub() noexcept
Definition AMReX_ParallelContext.H:88
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
void pop()
Note that it's the user's responsibility to free the MPI_Comm.
Definition AMReX_ParallelContext.H:108
bool IOProcessorSub() noexcept
Am IO processor for current frame?
Definition AMReX_ParallelContext.H:80
Definition AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:1977
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition AMReX_Box.H:1453
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2852
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:137
void EB_average_face_to_cellcenter(MultiFab &ccmf, int dcomp, const Array< MultiFab const *, 3 > &fmf)
Definition AMReX_EBMultiFabUtil.cpp:806
std::array< T const *, 3 > GetArrOfConstPtrs(const std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1010
__host__ __device__ T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:348
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2867
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
std::string getEnumNameString(T const &v)
Definition AMReX_Enum.H:154
IntVect nGrowVect(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2857
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, IntVect const &ng_vect)
Definition AMReX_MultiFabUtil.cpp:155
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition AMReX_FabArrayUtility.H:1898
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition AMReX_FabArrayUtility.H:1906
double second() noexcept
Definition AMReX_Utility.cpp:940
std::array< T *, 3 > GetArrOfPtrs(std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:998
void UtilCreateCleanDirectory(const std::string &path, bool callbarrier=true)
Create a new directory, renaming the old one if it exists.
Definition AMReX_Utility.cpp:167
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
BottomSolver
Definition AMReX_MLLinOp.H:30
IntVectND< 3 > IntVect
Definition AMReX_BaseFwd.H:30
RealVectND< 3 > RealVect
Definition AMReX_ParmParse.H:35
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition AMReX_FabArrayUtility.H:1882
Vector< T * > GetVecOfPtrs(Vector< T > &a)
Definition AMReX_Vector.H:64
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition AMReX_FabArrayUtility.H:1889
Vector< std::array< T *, 3 > > GetVecOfArrOfPtrs(const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
Definition AMReX_Vector.H:141
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
MLMGNormType
Definition AMReX_MLMG.H:12
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:1967
int verbose
Definition AMReX_DistributionMapping.cpp:36
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition AMReX_FabArrayUtility.H:1875
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2862
std::array< T, N > Array
Definition AMReX_Array.H:24
BCMode
Definition AMReX_MLLinOp.H:85
Location
Definition AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition AMReX_FabArray.H:66