Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_MLMG.H
Go to the documentation of this file.
1#ifndef AMREX_ML_MG_H_
2#define AMREX_ML_MG_H_
3#include <AMReX_Config.H>
4#include <AMReX_Enum.H>
5
6#include <AMReX_MLLinOp.H>
7#include <AMReX_MLCGSolver.H>
8
9namespace amrex {
10
11// Norm used to evaluate the target convergece criteria
13
15template <typename MF>
16class MLMGT
17{
18public:
19
20 class error
21 : public std::runtime_error
22 {
23 public :
24 using std::runtime_error::runtime_error;
25 };
26
27 template <typename T> friend class MLCGSolverT;
28 template <typename M> friend class GMRESMLMGT;
29
30 using MFType = MF;
31 using FAB = typename MLLinOpT<MF>::FAB;
32 using RT = typename MLLinOpT<MF>::RT;
33
34 using BCMode = typename MLLinOpT<MF>::BCMode;
36
38 enum class CFStrategy : int {none,ghostnodes};
39
40 MLMGT (MLLinOpT<MF>& a_lp);
42
43 MLMGT (MLMGT<MF> const&) = delete;
44 MLMGT (MLMGT<MF> &&) = delete;
45 MLMGT<MF>& operator= (MLMGT<MF> const&) = delete;
47
48 // Optional argument checkpoint_file is for debugging only.
49 template <typename AMF>
50 RT solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
51 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
52
53 template <typename AMF>
54 RT solve (std::initializer_list<AMF*> a_sol,
55 std::initializer_list<AMF const*> a_rhs,
56 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
57
58 RT precond (Vector<MF*> const& a_sol, Vector<MF const*> const& a_rhs,
59 RT a_tol_rel, RT a_tol_abs);
60
61 template <typename AMF>
62 void getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol,
63 Location a_loc = Location::FaceCenter);
64
65 template <typename AMF>
66 void getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol,
67 Location a_loc = Location::FaceCenter);
68
72 template <typename AMF>
73 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
74 Location a_loc = Location::FaceCenter);
75
76 template <typename AMF>
77 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
78 Location a_loc = Location::FaceCenter);
79
80 template <typename AMF>
81 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
82 const Vector<AMF*> & a_sol,
83 Location a_loc = Location::FaceCenter);
84
85 template <typename AMF>
86 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
87 std::initializer_list<AMF*> a_sol,
88 Location a_loc = Location::FaceCenter);
89
90 template <typename AMF>
91 void getFluxes (const Vector<AMF*> & a_flux,
92 Location a_loc = Location::CellCenter);
93
94 template <typename AMF>
95 void getFluxes (std::initializer_list<AMF*> a_flux,
96 Location a_loc = Location::CellCenter);
97
98 template <typename AMF>
99 void getFluxes (const Vector<AMF*> & a_flux,
100 const Vector<AMF*> & a_sol,
101 Location a_loc = Location::CellCenter);
102
103 template <typename AMF>
104 void getFluxes (std::initializer_list<AMF*> a_flux,
105 std::initializer_list<AMF*> a_sol,
106 Location a_loc = Location::CellCenter);
107
108 void compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
109 const Vector<MF const*>& a_rhs);
110
111#ifdef AMREX_USE_EB
112 // Flux into the EB wall
113 void getEBFluxes (const Vector<MF*>& a_eb_flux);
114 void getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*> & a_sol);
115#endif
116
122 void apply (const Vector<MF*>& out, const Vector<MF*>& in);
123
125 void applyPrecond (const Vector<MF*>& out, const Vector<MF*>& in);
126
127 [[nodiscard]] int getVerbose () const { return verbose; }
128 [[nodiscard]] int getBottomVerbose () const { return bottom_verbose; }
129
130 void incPrintIdentation ();
131 void decPrintIdentation ();
132
133 void setThrowException (bool t) noexcept { throw_exception = t; }
134 void setVerbose (int v) noexcept { verbose = v; }
135 void setMaxIter (int n) noexcept { max_iters = n; }
136 void setMaxFmgIter (int n) noexcept { max_fmg_iters = n; }
137 void setFixedIter (int nit) noexcept { do_fixed_number_of_iters = nit; }
138 void setPrecondIter (int nit) noexcept { max_precond_iters = nit; }
139
140 void setPreSmooth (int n) noexcept { nu1 = n; }
141 void setPostSmooth (int n) noexcept { nu2 = n; }
142 void setFinalSmooth (int n) noexcept { nuf = n; }
143 void setBottomSmooth (int n) noexcept { nub = n; }
144
145 void setBottomSolver (BottomSolver s) noexcept { bottom_solver = s; }
146 [[nodiscard]] BottomSolver getBottomSolver () const noexcept { return bottom_solver; }
147 void setCFStrategy (CFStrategy a_cf_strategy) noexcept {cf_strategy = a_cf_strategy;}
148 void setBottomVerbose (int v) noexcept { bottom_verbose = v; }
149 void setBottomMaxIter (int n) noexcept { bottom_maxiter = n; }
150 void setBottomTolerance (RT t) noexcept { bottom_reltol = t; }
151 void setBottomToleranceAbs (RT t) noexcept { bottom_abstol = t;}
152 [[nodiscard]] RT getBottomToleranceAbs () const noexcept{ return bottom_abstol; }
153
154 [[deprecated("Use MLMG::setConvergenceNormType() instead.")]]
155 void setAlwaysUseBNorm (int flag) noexcept;
156
157 void setConvergenceNormType (MLMGNormType norm) noexcept { norm_type = norm; }
158
159 void setFinalFillBC (int flag) noexcept { final_fill_bc = flag; }
160
161 [[nodiscard]] int numAMRLevels () const noexcept { return namrlevs; }
162
163 void setNSolve (int flag) noexcept { do_nsolve = flag; }
164 void setNSolveGridSize (int s) noexcept { nsolve_grid_size = s; }
165
166 void setNoGpuSync (bool do_not_sync) noexcept { do_no_sync_gpu = do_not_sync; }
167
168#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
169 void setHypreInterface (Hypre::Interface f) noexcept {
170 // must use ij interface for EB
171#ifndef AMREX_USE_EB
172 hypre_interface = f;
173#else
175#endif
176 }
177
179 void setHypreOptionsNamespace(const std::string& prefix) noexcept
180 {
181 hypre_options_namespace = prefix;
182 }
183
184 void setHypreOldDefault (bool l) noexcept {hypre_old_default = l;}
185 void setHypreRelaxType (int n) noexcept {hypre_relax_type = n;}
186 void setHypreRelaxOrder (int n) noexcept {hypre_relax_order = n;}
187 void setHypreNumSweeps (int n) noexcept {hypre_num_sweeps = n;}
188 void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
189#endif
190
191 void prepareForFluxes (Vector<MF const*> const& a_sol);
192
193 template <typename AMF>
194 void prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs);
195
196 void prepareForNSolve ();
197
198 void prepareLinOp ();
199
200 void preparePrecond ();
201
202 void oneIter (int iter);
203
204 void miniCycle (int amrlev);
205
206 void mgVcycle (int amrlev, int mglev);
207 void mgFcycle ();
208
209 void bottomSolve ();
210 void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
211 void actualBottomSolve ();
212
213 void computeMLResidual (int amrlevmax);
214 void computeResidual (int alev);
215 void computeResWithCrseSolFineCor (int calev, int falev);
216 void computeResWithCrseCorFineCor (int falev);
217 void interpCorrection (int alev);
218 void interpCorrection (int alev, int mglev);
219 void addInterpCorrection (int alev, int mglev);
220
221 void computeResOfCorrection (int amrlev, int mglev);
222
223 RT ResNormInf (int alev, bool local = false);
224 RT MLResNormInf (int alevmax, bool local = false);
225 RT MLRhsNormInf (bool local = false);
226
227 void makeSolvable ();
228 void makeSolvable (int amrlev, int mglev, MF& mf);
229
230#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
231 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
232 void bottomSolveWithHypre (MF& x, const MF& b);
233#endif
234
235#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
236 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
237 void bottomSolveWithPETSc (MF& x, const MF& b);
238#endif
239
240 int bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type);
241
242 [[nodiscard]] RT getInitRHS () const noexcept { return m_rhsnorm0; }
243 // Initial composite residual
244 [[nodiscard]] RT getInitResidual () const noexcept { return m_init_resnorm0; }
245 // Final composite residual
246 [[nodiscard]] RT getFinalResidual () const noexcept { return m_final_resnorm0; }
247 // Residuals on the *finest* AMR level after each iteration
248 [[nodiscard]] Vector<RT> const& getResidualHistory () const noexcept { return m_iter_fine_resnorm0; }
249 [[nodiscard]] int getNumIters () const noexcept { return m_iter_fine_resnorm0.size(); }
250 [[nodiscard]] Vector<int> const& getNumCGIters () const noexcept { return m_niters_cg; }
251
252 MLLinOpT<MF>& getLinOp () { return linop; }
253
254private:
255
256 bool precond_mode = false;
257 bool throw_exception = false;
258 int verbose = 1;
259
260 int max_iters = 200;
261 int do_fixed_number_of_iters = 0;
262 int max_precond_iters = 1;
263
264 int nu1 = 2;
265 int nu2 = 2;
266 int nuf = 8;
267 int nub = 0;
268
269 int max_fmg_iters = 0;
270
271 BottomSolver bottom_solver = BottomSolver::Default;
272 CFStrategy cf_strategy = CFStrategy::none;
273 int bottom_verbose = 0;
274 int bottom_maxiter = 200;
275 RT bottom_reltol = std::is_same<RT,double>() ? RT(1.e-4) : RT(1.e-3);
276 RT bottom_abstol = RT(-1.0);
277
279
280 int final_fill_bc = 0;
281
282 MLLinOpT<MF>& linop;
283 int ncomp;
284 int namrlevs;
285 int finest_amr_lev;
286
287 bool linop_prepared = false;
288 Long solve_called = 0;
289
291 int do_nsolve = false;
292 int nsolve_grid_size = 16;
293 std::unique_ptr<MLLinOpT<MF>> ns_linop;
294 std::unique_ptr<MLMGT<MF>> ns_mlmg;
295 std::unique_ptr<MF> ns_sol;
296 std::unique_ptr<MF> ns_rhs;
297
298 std::string print_ident;
299
300 bool do_no_sync_gpu = false;
301
303#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
304 // Hypre::Interface hypre_interface = Hypre::Interface::structed;
305 // Hypre::Interface hypre_interface = Hypre::Interface::semi_structed;
306 Hypre::Interface hypre_interface = Hypre::Interface::ij;
307
308 std::unique_ptr<Hypre> hypre_solver;
309 std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
310 std::unique_ptr<HypreNodeLap> hypre_node_solver;
311
312 std::string hypre_options_namespace = "hypre";
313 bool hypre_old_default = true; // Falgout coarsening with modified classical interpolation
314 int hypre_relax_type = 6; // G-S/Jacobi hybrid relaxation
315 int hypre_relax_order = 1; // uses C/F relaxation
316 int hypre_num_sweeps = 2; // Sweeps on each level
317 Real hypre_strong_threshold = 0.25; // Hypre default is 0.25
318#endif
319
321#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
322 std::unique_ptr<PETScABecLap> petsc_solver;
323 std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
324#endif
325
330 Vector<MF> sol;
331 Vector<MF> rhs;
333
334 Vector<int> sol_is_alias;
335
340 Vector<Vector<MF> > res;
341 Vector<Vector<MF> > cor;
342 Vector<Vector<MF> > cor_hold;
343 Vector<Vector<MF> > rescor;
345
346 enum timer_types { solve_time=0, iter_time, bottom_time, ntimers };
347 Vector<double> timer;
348
349 RT m_rhsnorm0 = RT(-1.0);
350 RT m_init_resnorm0 = RT(-1.0);
351 RT m_final_resnorm0 = RT(-1.0);
352 Vector<int> m_niters_cg;
353 Vector<RT> m_iter_fine_resnorm0; // Residual for each iteration at the finest level
354
355 void checkPoint (const Vector<MultiFab*>& a_sol,
356 const Vector<MultiFab const*>& a_rhs,
357 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const;
358
359};
360
361template <typename MF>
363 : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
364 finest_amr_lev(a_lp.NAMRLevels()-1)
365{}
366
367template <typename MF> MLMGT<MF>::~MLMGT () = default;
368
369template <typename MF>
370void
372{
373 if (flag) {
374 norm_type = MLMGNormType::bnorm;
375 } else {
376 norm_type = MLMGNormType::greater;
377 }
378}
379
380template <typename MF>
381template <typename AMF>
382auto
383MLMGT<MF>::solve (std::initializer_list<AMF*> a_sol,
384 std::initializer_list<AMF const*> a_rhs,
385 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
386{
387 return solve(Vector<AMF*>(std::move(a_sol)),
388 Vector<AMF const*>(std::move(a_rhs)),
389 a_tol_rel, a_tol_abs, checkpoint_file);
390}
391
392template <typename MF>
393template <typename AMF>
394auto
396 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
397{
398 BL_PROFILE("MLMG::solve()");
399
400 bool prev_in_single_stream_region = false;
401 bool prev_in_nosync_region = false;
402
403 if (do_no_sync_gpu) {
404 prev_in_single_stream_region = Gpu::setSingleStreamRegion(true);
405 prev_in_nosync_region = Gpu::setNoSyncRegion(true);
406 }
407
408 if constexpr (std::is_same<AMF,MultiFab>()) {
409 if (checkpoint_file != nullptr) {
410 checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
411 }
412 }
413
414 if (bottom_solver == BottomSolver::Default) {
415 bottom_solver = linop.getDefaultBottomSolver();
416 }
417
418#if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
419 if constexpr (IsFabArray_v<AMF>) {
420 if (bottom_solver == BottomSolver::hypre || bottom_solver == BottomSolver::petsc) {
421 int mo = linop.getMaxOrder();
422 if (a_sol[0]->hasEBFabFactory()) {
423 linop.setMaxOrder(2);
424 } else {
425 linop.setMaxOrder(std::min(3,mo)); // maxorder = 4 not supported
426 }
427 }
428 }
429#endif
430
431 bool is_nsolve = linop.m_parent;
432
433 auto solve_start_time = amrex::second();
434
435 RT& composite_norminf = m_final_resnorm0;
436
437 m_niters_cg.clear();
438 m_iter_fine_resnorm0.clear();
439
440 prepareForSolve(a_sol, a_rhs);
441
442 computeMLResidual(finest_amr_lev);
443
444 bool local = true;
445 RT resnorm0 = MLResNormInf(finest_amr_lev, local);
446 RT rhsnorm0 = MLRhsNormInf(local);
447 if (!is_nsolve) {
448 ParallelAllReduce::Max<RT>({resnorm0, rhsnorm0}, ParallelContext::CommunicatorSub());
449
450 if (verbose >= 1)
451 {
452 amrex::Print() << print_ident << "MLMG: Initial rhs = " << rhsnorm0 << "\n"
453 << print_ident << "MLMG: Initial residual (resid0) = " << resnorm0 << "\n";
454 }
455 }
456
457 m_init_resnorm0 = resnorm0;
458 m_rhsnorm0 = rhsnorm0;
459
460 RT max_norm = resnorm0;
461 std::string norm_name = "resid0";
462 switch (norm_type) {
464 if (rhsnorm0 >= resnorm0) {
465 norm_name = "bnorm";
466 max_norm = rhsnorm0;
467 } else {
468 norm_name = "resid0";
469 max_norm = resnorm0;
470 }
471 break;
473 norm_name = "bnorm";
474 max_norm = rhsnorm0;
475 break;
477 norm_name = "resid0";
478 max_norm = resnorm0;
479 break;
480 }
481
482 const RT res_target = std::max(a_tol_abs, std::max(a_tol_rel,RT(1.e-16))*max_norm);
483
484 if (!is_nsolve && resnorm0 <= res_target) {
485 composite_norminf = resnorm0;
486 if (verbose >= 1) {
487 amrex::Print() << print_ident << "MLMG: No iterations needed\n";
488 }
489 } else {
490 auto iter_start_time = amrex::second();
491 bool converged = false;
492
493 const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
494 for (int iter = 0; iter < niters; ++iter)
495 {
496 oneIter(iter);
497
498 converged = false;
499
500 // Test convergence on the fine amr level
501 computeResidual(finest_amr_lev);
502
503 if (is_nsolve) { continue; }
504
505 RT fine_norminf = ResNormInf(finest_amr_lev);
506 m_iter_fine_resnorm0.push_back(fine_norminf);
507 composite_norminf = fine_norminf;
508 if (verbose >= 2) {
509 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1 << " Fine resid/"
510 << norm_name << " = " << fine_norminf/max_norm << "\n";
511 }
512 bool fine_converged = (fine_norminf <= res_target);
513
514 if (namrlevs == 1 && fine_converged) {
515 converged = true;
516 } else if (fine_converged) {
517 // finest level is converged, but we still need to test the coarse levels
518 computeMLResidual(finest_amr_lev-1);
519 RT crse_norminf = MLResNormInf(finest_amr_lev-1);
520 if (verbose >= 2) {
521 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1
522 << " Crse resid/" << norm_name << " = "
523 << crse_norminf/max_norm << "\n";
524 }
525 converged = (crse_norminf <= res_target);
526 composite_norminf = std::max(fine_norminf, crse_norminf);
527 } else {
528 converged = false;
529 }
530
531 if (converged) {
532 if (verbose >= 1) {
533 amrex::Print() << print_ident << "MLMG: Final Iter. " << iter+1
534 << " resid, resid/" << norm_name << " = "
535 << composite_norminf << ", "
536 << composite_norminf/max_norm << "\n";
537 }
538 break;
539 } else {
540 if (composite_norminf > RT(1.e20)*max_norm)
541 {
542 if (verbose > 0) {
543 amrex::Print() << print_ident << "MLMG: Failing to converge after " << iter+1 << " iterations."
544 << " resid, resid/" << norm_name << " = "
545 << composite_norminf << ", "
546 << composite_norminf/max_norm << "\n";
547 }
548
549 if ( throw_exception ) {
550 throw error("MLMG blew up.");
551 } else {
552 amrex::Abort("MLMG failing so lets stop here");
553 }
554 }
555 }
556 }
557
558 if (!converged && do_fixed_number_of_iters == 0) {
559 if (verbose > 0) {
560 amrex::Print() << print_ident << "MLMG: Failed to converge after " << max_iters << " iterations."
561 << " resid, resid/" << norm_name << " = "
562 << composite_norminf << ", "
563 << composite_norminf/max_norm << "\n";
564 }
565
566 if ( throw_exception ) {
567 throw error("MLMG failed to converge.");
568 } else {
569 amrex::Abort("MLMG failed.");
570 }
571 }
572 timer[iter_time] = amrex::second() - iter_start_time;
573 }
574
575 linop.postSolve(GetVecOfPtrs(sol));
576
577 IntVect ng_back = final_fill_bc ? IntVect(1) : IntVect(0);
578 if (linop.hasHiddenDimension()) {
579 ng_back[linop.hiddenDirection()] = 0;
580 }
581 for (int alev = 0; alev < namrlevs; ++alev)
582 {
583 if (!sol_is_alias[alev]) {
584 LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
585 }
586 }
587
588 timer[solve_time] = amrex::second() - solve_start_time;
589 if (verbose >= 1) {
590 ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
593 {
594 amrex::AllPrint() << print_ident << "MLMG: Timers: Solve = " << timer[solve_time]
595 << " Iter = " << timer[iter_time]
596 << " Bottom = " << timer[bottom_time] << "\n";
597 }
598 }
599
600 ++solve_called;
601
602 if (do_no_sync_gpu) {
603 (void)Gpu::setSingleStreamRegion(prev_in_single_stream_region);
604 (void)Gpu::setNoSyncRegion(prev_in_nosync_region);
605 }
606
607 return composite_norminf;
608}
609
610template <typename MF>
611auto
613 RT a_tol_rel, RT a_tol_abs) -> RT
614{
615 precond_mode = true;
616 std::swap(max_precond_iters, do_fixed_number_of_iters);
617 linop.beginPrecondBC();
618
619 auto r = solve(a_sol, a_rhs, a_tol_rel, a_tol_abs);
620
621 linop.endPrecondBC();
622 std::swap(max_precond_iters, do_fixed_number_of_iters);
623 precond_mode = false;
624
625 return r;
626}
627
628template <typename MF>
629void
631{
632 for (int alev = finest_amr_lev; alev >= 0; --alev) {
633 const MF* crse_bcdata = (alev > 0) ? a_sol[alev-1] : nullptr;
634 linop.prepareForFluxes(alev, crse_bcdata);
635 }
636}
637
638template <typename MF>
639template <typename AMF>
640void
642{
643 BL_PROFILE("MLMG::getGradSolution()");
644 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
645 if constexpr (std::is_same<AMF,MF>()) {
646 linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
647 } else {
649 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
650 auto const& amf = *(a_grad_sol[alev][idim]);
651 grad_sol[idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
652 }
653 linop.compGrad(alev, GetArrOfPtrs(grad_sol), sol[alev], a_loc);
654 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
655 LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp, IntVect(0));
656 }
657 }
658 }
659}
660
661template <typename MF>
662template <typename AMF>
663void
664MLMGT<MF>::getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol, Location a_loc)
665{
666 getGradSolution(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_grad_sol)), a_loc);
667}
668
669template <typename MF>
670template <typename AMF>
671void
673 Location a_loc)
674{
675 if (!linop.isCellCentered()) {
676 amrex::Abort("Calling wrong getFluxes for nodal solver");
677 }
678
679 AMREX_ASSERT(sol.size() == a_flux.size());
680
681 if constexpr (std::is_same<AMF,MF>()) {
682 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
683 } else {
684 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
685 for (int ilev = 0; ilev < namrlevs; ++ilev) {
686 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
687 auto const& amf = *(a_flux[ilev][idim]);
688 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
689 }
690 }
691 getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
692 for (int ilev = 0; ilev < namrlevs; ++ilev) {
693 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
694 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
695 }
696 }
697 }
698}
699
700template <typename MF>
701template <typename AMF>
702void
704 Location a_loc)
705{
706 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)), a_loc);
707}
708
709template <typename MF>
710template <typename AMF>
711void
713 const Vector<AMF*>& a_sol, Location a_loc)
714{
715 BL_PROFILE("MLMG::getFluxes()");
716
717 if (!linop.isCellCentered()) {
718 amrex::Abort("Calling wrong getFluxes for nodal solver");
719 }
720
721 if constexpr (std::is_same<AMF,MF>()) {
722 linop.getFluxes(a_flux, a_sol, a_loc);
723 } else {
724 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
725 for (int ilev = 0; ilev < namrlevs; ++ilev) {
726 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
727 auto const& amf = *(a_flux[ilev][idim]);
728 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
729 }
730 LocalCopy(sol[ilev], *a_sol[ilev], 0, 0, ncomp, nGrowVect(sol[ilev]));
731 }
732 linop.getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
733 for (int ilev = 0; ilev < namrlevs; ++ilev) {
734 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
735 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
736 }
737 }
738 }
739}
740
741template <typename MF>
742template <typename AMF>
743void
745 std::initializer_list<AMF*> a_sol, Location a_loc)
746{
747 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)),
748 Vector<AMF*>(std::move(a_sol)), a_loc);
749}
750
751template <typename MF>
752template <typename AMF>
753void
755{
756 AMREX_ASSERT(sol.size() == a_flux.size());
757 if constexpr (std::is_same<AMF,MF>()) {
758 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
759 } else {
760 Vector<MF> fluxes(namrlevs);
761 for (int ilev = 0; ilev < namrlevs; ++ilev) {
762 auto const& amf = *a_flux[ilev];
763 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
764 }
765 getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
766 for (int ilev = 0; ilev < namrlevs; ++ilev) {
767 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
768 }
769 }
770}
771
772template <typename MF>
773template <typename AMF>
774void
775MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux, Location a_loc)
776{
777 getFluxes(Vector<AMF*>(std::move(a_flux)), a_loc);
778}
779
780template <typename MF>
781template <typename AMF>
782void
784 const Vector<AMF*>& a_sol, Location /*a_loc*/)
785{
786 AMREX_ASSERT(nComp(*a_flux[0]) >= AMREX_SPACEDIM);
787
788 if constexpr (! std::is_same<AMF,MF>()) {
789 for (int alev = 0; alev < namrlevs; ++alev) {
790 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, nGrowVect(sol[alev]));
791 }
792 }
793
794 if (linop.isCellCentered())
795 {
796 Vector<Array<MF,AMREX_SPACEDIM> > ffluxes(namrlevs);
797 for (int alev = 0; alev < namrlevs; ++alev) {
798 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
799 const int mglev = 0;
800 int nghost = 0;
801 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
802 ffluxes[alev][idim].define(amrex::convert(linop.m_grids[alev][mglev],
804 linop.m_dmap[alev][mglev], ncomp, nghost, MFInfo(),
805 *linop.m_factory[alev][mglev]);
806 }
807 }
808 if constexpr (std::is_same<AMF,MF>()) {
809 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), a_sol, Location::FaceCenter);
810 } else {
811 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), GetVecOfPtrs(sol), Location::FaceCenter);
812 }
813 for (int alev = 0; alev < namrlevs; ++alev) {
814#ifdef AMREX_USE_EB
815 EB_average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
816#else
817 average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
818#endif
819 }
820
821 } else {
822 if constexpr (std::is_same<AMF,MF>()) {
823 linop.getFluxes(a_flux, a_sol);
824 } else {
825 Vector<MF> fluxes(namrlevs);
826 for (int ilev = 0; ilev < namrlevs; ++ilev) {
827 auto const& amf = *a_flux[ilev];
828 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
829 }
830 linop.getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol));
831 for (int ilev = 0; ilev < namrlevs; ++ilev) {
832 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
833 }
834 }
835 }
836}
837
838template <typename MF>
839template <typename AMF>
840void
841MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux,
842 std::initializer_list<AMF*> a_sol, Location a_loc)
843{
844 getFluxes(Vector<AMF*>(std::move(a_flux)),
845 Vector<AMF*>(std::move(a_sol)), a_loc);
846}
847
848#ifdef AMREX_USE_EB
849template <typename MF>
850void
852{
853 if (!linop.isCellCentered()) {
854 amrex::Abort("getEBFluxes is for cell-centered only");
855 }
856
857 AMREX_ASSERT(sol.size() == a_eb_flux.size());
858 getEBFluxes(a_eb_flux, GetVecOfPtrs(sol));
859}
860
861template <typename MF>
862void
863MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*>& a_sol)
864{
865 BL_PROFILE("MLMG::getEBFluxes()");
866
867 if (!linop.isCellCentered()) {
868 amrex::Abort("getEBFluxes is for cell-centered only");
869 }
870
871 linop.getEBFluxes(a_eb_flux, a_sol);
872}
873#endif
874
875template <typename MF>
876void
878 const Vector<MF const*>& a_rhs)
879{
880 BL_PROFILE("MLMG::compResidual()");
881
882 IntVect ng_sol(1);
883 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
884
885 sol.resize(namrlevs);
886 sol_is_alias.resize(namrlevs,true);
887 for (int alev = 0; alev < namrlevs; ++alev)
888 {
889 if (cf_strategy == CFStrategy::ghostnodes || nGrowVect(*a_sol[alev]) == ng_sol)
890 {
891 sol[alev] = linop.makeAlias(*a_sol[alev]);
892 sol_is_alias[alev] = true;
893 }
894 else
895 {
896 if (sol_is_alias[alev])
897 {
898 sol[alev] = linop.make(alev, 0, ng_sol);
899 }
900 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
901 }
902 }
903
904 prepareLinOp();
905
906
907 for (int alev = finest_amr_lev; alev >= 0; --alev) {
908 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
909 const MF* prhs = a_rhs[alev];
910#if (AMREX_SPACEDIM != 3)
911 int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
912 MF rhstmp(boxArray(*prhs), DistributionMap(*prhs), ncomp, nghost,
913 MFInfo(), *linop.Factory(alev));
914 LocalCopy(rhstmp, *prhs, 0, 0, ncomp, IntVect(nghost));
915 linop.applyMetricTerm(alev, 0, rhstmp);
916 linop.unimposeNeumannBC(alev, rhstmp);
917 linop.applyInhomogNeumannTerm(alev, rhstmp);
918 prhs = &rhstmp;
919#endif
920 linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
921 if (alev < finest_amr_lev) {
922 linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
923 *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
924 if (linop.isCellCentered()) {
925#ifdef AMREX_USE_EB
926 EB_average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, linop.AMRRefRatioVect(alev));
927#else
928 average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, linop.AMRRefRatioVect(alev));
929#endif
930 }
931 }
932 }
933
934
935#if (AMREX_SPACEDIM != 3)
936 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
937 linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
938 }
939#endif
940}
941
942template <typename MF>
943void
944MLMGT<MF>::apply (const Vector<MF*>& out, const Vector<MF*>& a_in)
945{
946 BL_PROFILE("MLMG::apply()");
947
948 Vector<MF*> in(namrlevs);
949 Vector<MF> in_raii(namrlevs);
950 Vector<MF> rh(namrlevs);
951 int nghost = 0;
952 IntVect ng_sol(1);
953 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
954
955 for (int alev = 0; alev < namrlevs; ++alev)
956 {
957 if (cf_strategy == CFStrategy::ghostnodes)
958 {
959 nghost = linop.getNGrow(alev);
960 in[alev] = a_in[alev];
961 }
962 else if (nGrowVect(*a_in[alev]) == ng_sol)
963 {
964 in[alev] = a_in[alev];
965 }
966 else
967 {
968 IntVect ng = ng_sol;
969 if (cf_strategy == CFStrategy::ghostnodes) { ng = IntVect(nghost); }
970 in_raii[alev] = linop.make(alev, 0, ng);
971 LocalCopy(in_raii[alev], *a_in[alev], 0, 0, ncomp, IntVect(nghost));
972 in[alev] = &(in_raii[alev]);
973 }
974 rh[alev] = linop.make(alev, 0, IntVect(nghost));
975 setVal(rh[alev], RT(0.0));
976 }
977
978 prepareLinOp();
979
980 for (int alev = 0; alev < namrlevs; ++alev) {
981 linop.applyInhomogNeumannTerm(alev, rh[alev]);
982 }
983
984
985 for (int alev = finest_amr_lev; alev >= 0; --alev) {
986 const MF* crse_bcdata = (alev > 0) ? in[alev-1] : nullptr;
987 linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
988 if (alev < finest_amr_lev) {
989 linop.reflux(alev, *out[alev], *in[alev], rh[alev],
990 *out[alev+1], *in[alev+1], rh[alev+1]);
991 if (linop.isCellCentered()) {
992 if constexpr (IsMultiFabLike_v<MF>) {
993#ifdef AMREX_USE_EB
994 EB_average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), linop.AMRRefRatioVect(alev));
995#else
996 average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), linop.AMRRefRatioVect(alev));
997#endif
998 } else {
999 amrex::Abort("MLMG: TODO average_down for non-MultiFab");
1000 }
1001 }
1002 }
1003 }
1004
1005#if (AMREX_SPACEDIM != 3)
1006 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1007 linop.unapplyMetricTerm(alev, 0, *out[alev]);
1008 }
1009#endif
1010
1011 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1012 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
1013 Scale(*out[alev], RT(-1), 0, nComp(*out[alev]), nghost);
1014 }
1015}
1016
1017template <typename MF>
1018void
1020{
1021 precond_mode = true;
1022 linop.beginPrecondBC();
1023 apply(out, in);
1024 linop.endPrecondBC();
1025 precond_mode = false;
1026}
1027
1028template <typename MF>
1029template <typename AMF>
1030void
1032{
1033 BL_PROFILE("MLMG::prepareForSolve()");
1034
1035 AMREX_ASSERT(namrlevs <= a_sol.size());
1036 AMREX_ASSERT(namrlevs <= a_rhs.size());
1037
1038 timer.assign(ntimers, 0.0);
1039
1040 IntVect ng_rhs(0);
1041 IntVect ng_sol(1);
1042 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1043
1044 if (!linop_prepared) {
1045 linop.prepareForSolve();
1046 linop_prepared = true;
1047 } else if (linop.needsUpdate()) {
1048 linop.update();
1049
1050#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1051 hypre_solver.reset();
1052 hypre_bndry.reset();
1053 hypre_node_solver.reset();
1054#endif
1055
1056#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1057 petsc_solver.reset();
1058 petsc_bndry.reset();
1059#endif
1060 }
1061
1062 sol.resize(namrlevs);
1063 sol_is_alias.resize(namrlevs,false);
1064 for (int alev = 0; alev < namrlevs; ++alev)
1065 {
1066 if (cf_strategy == CFStrategy::ghostnodes)
1067 {
1068 if constexpr (std::is_same<AMF,MF>()) {
1069 sol[alev] = linop.makeAlias(*a_sol[alev]);
1070 sol_is_alias[alev] = true;
1071 } else {
1072 amrex::Abort("Type conversion not supported for CFStrategy::ghostnodes");
1073 }
1074 }
1075 else
1076 {
1077 if (nGrowVect(*a_sol[alev]) == ng_sol) {
1078 if constexpr (std::is_same<AMF,MF>()) {
1079 sol[alev] = linop.makeAlias(*a_sol[alev]);
1080 sol_is_alias[alev] = true;
1081 }
1082 }
1083 if (!sol_is_alias[alev]) {
1084 if (!solve_called) {
1085 sol[alev] = linop.make(alev, 0, ng_sol);
1086 }
1087 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
1088 setBndry(sol[alev], RT(0.0), 0, ncomp);
1089 }
1090 }
1091 }
1092
1093 rhs.resize(namrlevs);
1094 for (int alev = 0; alev < namrlevs; ++alev)
1095 {
1096 if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs = IntVect(linop.getNGrow(alev)); }
1097 if (!solve_called) {
1098 rhs[alev] = linop.make(alev, 0, ng_rhs);
1099 }
1100 LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
1101 linop.applyMetricTerm(alev, 0, rhs[alev]);
1102 linop.unimposeNeumannBC(alev, rhs[alev]);
1103 linop.applyInhomogNeumannTerm(alev, rhs[alev]);
1104 linop.applyOverset(alev, rhs[alev]);
1105 if ( ! precond_mode) {
1106 bool r = linop.scaleRHS(alev, &(rhs[alev]));
1108 }
1109
1110#ifdef AMREX_USE_EB
1111 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(linop.Factory(alev));
1112 if (factory && !factory->isAllRegular()) {
1113 if constexpr (std::is_same<MF,MultiFab>()) {
1114 EB_set_covered(rhs[alev], 0, ncomp, 0, RT(0.0));
1115 EB_set_covered(sol[alev], 0, ncomp, 0, RT(0.0));
1116 } else {
1117 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1118 }
1119 }
1120#endif
1121 }
1122
1123 for (int falev = finest_amr_lev; falev > 0; --falev)
1124 {
1125 linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1126 }
1127
1128 // enforce solvability if appropriate
1129 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1130 {
1131 makeSolvable();
1132 }
1133
1134 IntVect ng = linop.getNGrowVectRestriction();
1135 if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1136 if (!solve_called) {
1137 linop.make(res, ng);
1138 linop.make(rescor, ng);
1139 }
1140 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1141 {
1142 const int nmglevs = linop.NMGLevels(alev);
1143 for (int mglev = 0; mglev < nmglevs; ++mglev)
1144 {
1145 setVal(res [alev][mglev], RT(0.0));
1146 setVal(rescor[alev][mglev], RT(0.0));
1147 }
1148 }
1149
1150 if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1151 cor.resize(namrlevs);
1152 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1153 {
1154 const int nmglevs = linop.NMGLevels(alev);
1155 cor[alev].resize(nmglevs);
1156 for (int mglev = 0; mglev < nmglevs; ++mglev)
1157 {
1158 if (!solve_called) {
1159 IntVect _ng = ng;
1160 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1161 cor[alev][mglev] = linop.make(alev, mglev, _ng);
1162 }
1163 setVal(cor[alev][mglev], RT(0.0));
1164 }
1165 }
1166
1167 cor_hold.resize(std::max(namrlevs-1,1));
1168 {
1169 const int alev = 0;
1170 const int nmglevs = linop.NMGLevels(alev);
1171 cor_hold[alev].resize(nmglevs);
1172 for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1173 {
1174 if (!solve_called) {
1175 IntVect _ng = ng;
1176 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1177 cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1178 }
1179 setVal(cor_hold[alev][mglev], RT(0.0));
1180 }
1181 }
1182 for (int alev = 1; alev < finest_amr_lev; ++alev)
1183 {
1184 cor_hold[alev].resize(1);
1185 if (!solve_called) {
1186 IntVect _ng = ng;
1187 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev)); }
1188 cor_hold[alev][0] = linop.make(alev, 0, _ng);
1189 }
1190 setVal(cor_hold[alev][0], RT(0.0));
1191 }
1192
1193 if (linop.m_parent // no embedded N-Solve
1194 || !linop.supportNSolve())
1195 {
1196 do_nsolve = false;
1197 }
1198
1199 if (do_nsolve && ns_linop == nullptr)
1200 {
1201 prepareForNSolve();
1202 }
1203
1204 if (verbose >= 2) {
1205 amrex::Print() << print_ident << "MLMG: # of AMR levels: " << namrlevs << "\n"
1206 << print_ident << " # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1207 << "\n";
1208 if (ns_linop) {
1209 amrex::Print() << print_ident << " # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) << "\n"
1210 << print_ident << " # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() << "\n";
1211 }
1212 }
1213}
1214
1215template <typename MF>
1216void
1218{
1219 if (!linop_prepared) {
1220 linop.prepareForSolve();
1221 linop_prepared = true;
1222 } else if (linop.needsUpdate()) {
1223 linop.update();
1224 }
1225}
1226
1227template <typename MF>
1228void
1230{
1231 prepareLinOp();
1232 linop.preparePrecond();
1233}
1234
1235template <typename MF>
1236void
1238{
1239 if constexpr (IsMultiFabLike_v<MF>) {
1240 ns_linop = linop.makeNLinOp(nsolve_grid_size);
1241
1242 int nghost = 0;
1243 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1244
1245 const BoxArray& ba = (*ns_linop).m_grids[0][0];
1246 const DistributionMapping& dm =(*ns_linop).m_dmap[0][0];
1247
1248 int ng = 1;
1249 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1250 ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1251 ng = 0;
1252 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1253 ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1254 setVal(*ns_sol, RT(0.0));
1255 setVal(*ns_rhs, RT(0.0));
1256
1257 ns_linop->setLevelBC(0, ns_sol.get());
1258
1259 ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1260 ns_mlmg->setVerbose(0);
1261 ns_mlmg->setFixedIter(1);
1262 ns_mlmg->setMaxFmgIter(20);
1263 ns_mlmg->setBottomSolver(BottomSolver::smoother);
1264 }
1265}
1266
1267// in : Residual (res) on the finest AMR level
1268// out : sol on all AMR levels
1269template <typename MF>
1270void MLMGT<MF>::oneIter (int iter)
1271{
1272 BL_PROFILE("MLMG::oneIter()");
1273
1274 for (int alev = finest_amr_lev; alev > 0; --alev)
1275 {
1276 miniCycle(alev);
1277
1278 IntVect nghost(0);
1279 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1280 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1281
1282 // compute residual for the coarse AMR level
1283 computeResWithCrseSolFineCor(alev-1,alev);
1284
1285 if (alev != finest_amr_lev) {
1286 std::swap(cor_hold[alev][0], cor[alev][0]); // save it for the up cycle
1287 }
1288 }
1289
1290 // coarsest amr level
1291 {
1292 // enforce solvability if appropriate
1293 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1294 {
1295 makeSolvable(0,0,res[0][0]);
1296 }
1297
1298 if (iter < max_fmg_iters) {
1299 mgFcycle();
1300 } else {
1301 mgVcycle(0, 0);
1302 }
1303
1304 IntVect nghost(0);
1305 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(0)); }
1306 LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1307 }
1308
1309 for (int alev = 1; alev <= finest_amr_lev; ++alev)
1310 {
1311 // (Fine AMR correction) = I(Coarse AMR correction)
1312 interpCorrection(alev);
1313
1314 IntVect nghost(0);
1315 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1316 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1317
1318 if (alev != finest_amr_lev) {
1319 LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1320 }
1321
1322 // Update fine AMR level correction
1323 computeResWithCrseCorFineCor(alev);
1324
1325 miniCycle(alev);
1326
1327 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1328
1329 if (alev != finest_amr_lev) {
1330 LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1331 }
1332 }
1333
1334 linop.averageDownAndSync(sol);
1335}
1336
1337template <typename MF>
1338void
1340{
1341 BL_PROFILE("MLMG::miniCycle()");
1342 const int mglev = 0;
1343 mgVcycle(amrlev, mglev);
1344}
1345
1346// in : Residual (res)
1347// out : Correction (cor) from bottom to this function's local top
1348template <typename MF>
1349void
1350MLMGT<MF>::mgVcycle (int amrlev, int mglev_top)
1351{
1352 BL_PROFILE("MLMG::mgVcycle()");
1353
1354 const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1355
1356 for (int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1357 {
1358 BL_PROFILE_VAR("MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1359
1360 if (verbose >= 4)
1361 {
1362 RT norm = norminf(res[amrlev][mglev],0,ncomp,IntVect(0));
1363 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1364 << " DN: Norm before smooth " << norm << "\n";
1365 }
1366
1367 setVal(cor[amrlev][mglev], RT(0.0));
1368 bool skip_fillboundary = true;
1369 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary, nu1);
1370
1371 // rescor = res - L(cor)
1372 computeResOfCorrection(amrlev, mglev);
1373
1374 if (verbose >= 4)
1375 {
1376 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1377 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1378 << " DN: Norm after smooth " << norm << "\n";
1379 }
1380
1381 // res_crse = R(rescor_fine); this provides res/b to the level below
1382 linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1383 }
1384
1385 BL_PROFILE_VAR("MLMG::mgVcycle_bottom", blp_bottom);
1386 if (amrlev == 0)
1387 {
1388 if (verbose >= 4)
1389 {
1390 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1391 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1392 << " DN: Norm before bottom " << norm << "\n";
1393 }
1394 bottomSolve();
1395 if (verbose >= 4)
1396 {
1397 computeResOfCorrection(amrlev, mglev_bottom);
1398 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1399 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1400 << " UP: Norm after bottom " << norm << "\n";
1401 }
1402 }
1403 else
1404 {
1405 if (verbose >= 4)
1406 {
1407 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1408 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1409 << " Norm before smooth " << norm << "\n";
1410 }
1411 setVal(cor[amrlev][mglev_bottom], RT(0.0));
1412 bool skip_fillboundary = true;
1413 linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1414 res[amrlev][mglev_bottom], skip_fillboundary, nu1);
1415 if (verbose >= 4)
1416 {
1417 computeResOfCorrection(amrlev, mglev_bottom);
1418 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1419 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1420 << " Norm after smooth " << norm << "\n";
1421 }
1422 }
1423 BL_PROFILE_VAR_STOP(blp_bottom);
1424
1425 for (int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1426 {
1427 BL_PROFILE_VAR("MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1428 // cor_fine += I(cor_crse)
1429 addInterpCorrection(amrlev, mglev);
1430 if (verbose >= 4)
1431 {
1432 computeResOfCorrection(amrlev, mglev);
1433 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1434 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1435 << " UP: Norm before smooth " << norm << "\n";
1436 }
1437 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], false, nu2);
1438
1439 if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1440
1441 if (verbose >= 4)
1442 {
1443 computeResOfCorrection(amrlev, mglev);
1444 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1445 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1446 << " UP: Norm after smooth " << norm << "\n";
1447 }
1448 }
1449}
1450
1451// FMG cycle on the coarsest AMR level.
1452// in: Residual on the top MG level (i.e., 0)
1453// out: Correction (cor) on all MG levels
1454template <typename MF>
1455void
1457{
1458 BL_PROFILE("MLMG::mgFcycle()");
1459
1460#ifdef AMREX_USE_EB
1461 auto* pf = linop.Factory(0);
1462 auto is_all_regular = [pf] () {
1463 const auto *const f = dynamic_cast<EBFArrayBoxFactory const*>(pf);
1464 if (f) {
1465 return f->isAllRegular();
1466 } else {
1467 return true;
1468 }
1469 };
1470 amrex::ignore_unused(pf, is_all_regular);
1471 AMREX_ASSERT(linop.isCellCentered() || is_all_regular());
1472#endif
1473
1474 const int amrlev = 0;
1475 const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1476 IntVect nghost(0);
1477 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(amrlev)); }
1478
1479 for (int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1480 {
1481 linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1482 }
1483
1484 bottomSolve();
1485
1486 for (int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1487 {
1488 // cor_fine = I(cor_crse)
1489 interpCorrection(amrlev, mglev);
1490
1491 // rescor = res - L(cor)
1492 computeResOfCorrection(amrlev, mglev);
1493 // res = rescor; this provides b to the vcycle below
1494 LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1495
1496 // save cor; do v-cycle; add the saved to cor
1497 std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1498 mgVcycle(amrlev, mglev);
1499 LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1500 }
1501}
1502
1503// At the true bottom of the coarsest AMR level.
1504// in : Residual (res) as b
1505// out : Correction (cor) as x
1506template <typename MF>
1507void
1509{
1510 if (do_nsolve)
1511 {
1512 NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1513 }
1514 else
1515 {
1516 actualBottomSolve();
1517 }
1518}
1519
1520template <typename MF>
1521void
1522MLMGT<MF>::NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs)
1523{
1524 BL_PROFILE("MLMG::NSolve()");
1525
1526 setVal(a_sol, RT(0.0));
1527
1528 MF const& res_bottom = res[0].back();
1529 if (BoxArray::SameRefs(boxArray(a_rhs),boxArray(res_bottom)) &&
1531 {
1532 LocalCopy(a_rhs, res_bottom, 0, 0, ncomp, IntVect(0));
1533 } else {
1534 setVal(a_rhs, RT(0.0));
1535 ParallelCopy(a_rhs, res_bottom, 0, 0, ncomp);
1536 }
1537
1538 a_solver.solve(Vector<MF*>{&a_sol}, Vector<MF const*>{&a_rhs},
1539 RT(-1.0), RT(-1.0));
1540
1541 linop.copyNSolveSolution(cor[0].back(), a_sol);
1542}
1543
1544template <typename MF>
1545void
1547{
1548 BL_PROFILE("MLMG::actualBottomSolve()");
1549
1550 if (!linop.isBottomActive()) { return; }
1551
1552 auto bottom_start_time = amrex::second();
1553
1554 ParallelContext::push(linop.BottomCommunicator());
1555
1556 const int amrlev = 0;
1557 const int mglev = linop.NMGLevels(amrlev) - 1;
1558 auto& x = cor[amrlev][mglev];
1559 auto& b = res[amrlev][mglev];
1560
1561 setVal(x, RT(0.0));
1562
1563 if (bottom_solver == BottomSolver::smoother)
1564 {
1565 bool skip_fillboundary = true;
1566 linop.smooth(amrlev, mglev, x, b, skip_fillboundary, nuf);
1567 }
1568 else
1569 {
1570 MF* bottom_b = &b;
1571 MF raii_b;
1572 if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1573 {
1574 const IntVect ng = nGrowVect(b);
1575 raii_b = linop.make(amrlev, mglev, ng);
1576 LocalCopy(raii_b, b, 0, 0, ncomp, ng);
1577 bottom_b = &raii_b;
1578
1579 makeSolvable(amrlev,mglev,*bottom_b);
1580 }
1581
1582 if (bottom_solver == BottomSolver::hypre)
1583 {
1584#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1585 if constexpr (std::is_same<MF,MultiFab>()) {
1586 bottomSolveWithHypre(x, *bottom_b);
1587 } else
1588#endif
1589 {
1590 amrex::Abort("Using Hypre as bottom solver not supported in this case");
1591 }
1592 }
1593 else if (bottom_solver == BottomSolver::petsc)
1594 {
1595#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1596 if constexpr (std::is_same<MF,MultiFab>()) {
1597 bottomSolveWithPETSc(x, *bottom_b);
1598 } else
1599#endif
1600 {
1601 amrex::Abort("Using PETSc as bottom solver not supported in this case");
1602 }
1603 }
1604 else
1605 {
1606 typename MLCGSolverT<MF>::Type cg_type;
1607 if (bottom_solver == BottomSolver::cg ||
1608 bottom_solver == BottomSolver::cgbicg) {
1609 cg_type = MLCGSolverT<MF>::Type::CG;
1610 } else {
1612 }
1613
1614 int ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1615
1616 if (ret != 0 && (bottom_solver == BottomSolver::cgbicg ||
1617 bottom_solver == BottomSolver::bicgcg))
1618 {
1619 if (bottom_solver == BottomSolver::cgbicg) {
1620 cg_type = MLCGSolverT<MF>::Type::BiCGStab; // switch to bicg
1621 } else {
1622 cg_type = MLCGSolverT<MF>::Type::CG; // switch to cg
1623 }
1624 setVal(cor[amrlev][mglev], RT(0.0));
1625 ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1626 if (ret == 0) { // switch permanently
1627 if (cg_type == MLCGSolverT<MF>::Type::CG) {
1628 bottom_solver = BottomSolver::cg;
1629 } else {
1630 bottom_solver = BottomSolver::bicgstab;
1631 }
1632 }
1633 }
1634
1635 // If the bottom solve failed then set the correction to zero
1636 if (ret != 0 && ret != 9) {
1637 setVal(cor[amrlev][mglev], RT(0.0));
1638 }
1639 const int n = (ret==0) ? nub : nuf;
1640 linop.smooth(amrlev, mglev, x, b, false, n);
1641 }
1642 }
1643
1645
1646 if (! timer.empty()) {
1647 timer[bottom_time] += amrex::second() - bottom_start_time;
1648 }
1649}
1650
1651template <typename MF>
1652int
1654{
1655 MLCGSolverT<MF> cg_solver(linop);
1656 cg_solver.setSolver(type);
1657 cg_solver.setVerbose(bottom_verbose);
1658 cg_solver.setPrintIdentation(print_ident);
1659 cg_solver.setMaxIter(bottom_maxiter);
1660 cg_solver.setInitSolnZeroed(true);
1661 if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.setNGhost(linop.getNGrow()); }
1662
1663 int ret = cg_solver.solve(x, b, bottom_reltol, bottom_abstol);
1664 if (ret != 0 && verbose > 1) {
1665 amrex::Print() << print_ident << "MLMG: Bottom solve failed.\n";
1666 }
1667 m_niters_cg.push_back(cg_solver.getNumIters());
1668 return ret;
1669}
1670
1671// Compute multi-level Residual (res) up to amrlevmax.
1672template <typename MF>
1673void
1675{
1676 BL_PROFILE("MLMG::computeMLResidual()");
1677
1678 const int mglev = 0;
1679 for (int alev = amrlevmax; alev >= 0; --alev) {
1680 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1681 linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1682 if (alev < finest_amr_lev) {
1683 linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1684 res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1685 }
1686 }
1687}
1688
1689// Compute single AMR level residual without masking.
1690template <typename MF>
1691void
1693{
1694 BL_PROFILE("MLMG::computeResidual()");
1695 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1696 linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1697}
1698
1699// Compute coarse AMR level composite residual with coarse solution and fine correction
1700template <typename MF>
1701void
1703{
1704 BL_PROFILE("MLMG::computeResWithCrseSolFineCor()");
1705
1706 IntVect nghost(0);
1707 if (cf_strategy == CFStrategy::ghostnodes) {
1708 nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1709 }
1710
1711 MF& crse_sol = sol[calev];
1712 const MF& crse_rhs = rhs[calev];
1713 MF& crse_res = res[calev][0];
1714
1715 MF& fine_sol = sol[falev];
1716 const MF& fine_rhs = rhs[falev];
1717 MF& fine_cor = cor[falev][0];
1718 MF& fine_res = res[falev][0];
1719 MF& fine_rescor = rescor[falev][0];
1720
1721 const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) : nullptr;
1722 linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1723
1724 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1725 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1726
1727 linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1728
1729 linop.avgDownResAmr(calev, crse_res, fine_res);
1730}
1731
1732// Compute fine AMR level residual fine_res = fine_res - L(fine_cor) with coarse providing BC.
1733template <typename MF>
1734void
1736{
1737 BL_PROFILE("MLMG::computeResWithCrseCorFineCor()");
1738
1739 IntVect nghost(0);
1740 if (cf_strategy == CFStrategy::ghostnodes) {
1741 nghost = IntVect(linop.getNGrow(falev));
1742 }
1743
1744 const MF& crse_cor = cor[falev-1][0];
1745
1746 MF& fine_cor = cor [falev][0];
1747 MF& fine_res = res [falev][0];
1748 MF& fine_rescor = rescor[falev][0];
1749
1750 // fine_rescor = fine_res - L(fine_cor)
1751 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1752 BCMode::Inhomogeneous, &crse_cor);
1753 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1754}
1755
1756// Interpolate correction from coarse to fine AMR level.
1757template <typename MF>
1758void
1760{
1761 BL_PROFILE("MLMG::interpCorrection_1");
1762
1763 IntVect nghost(0);
1764 if (cf_strategy == CFStrategy::ghostnodes) {
1765 nghost = IntVect(linop.getNGrow(alev));
1766 }
1767
1768 MF & crse_cor = cor[alev-1][0];
1769 MF & fine_cor = cor[alev ][0];
1770
1771 const Geometry& crse_geom = linop.Geom(alev-1,0);
1772
1773 int ng_src = 0;
1774 int ng_dst = linop.isCellCentered() ? 1 : 0;
1775 if (cf_strategy == CFStrategy::ghostnodes)
1776 {
1777 ng_src = linop.getNGrow(alev-1);
1778 ng_dst = linop.getNGrow(alev-1);
1779 if constexpr (IsMultiFabLike_v<MF>) {
1780 crse_cor.FillBoundary(0, ncomp, IntVect(ng_src), crse_geom.periodicity());
1781 } else {
1782 amrex::Abort("MLMG: CFStrategy::ghostnodes not supported for non-MultiFab like types");
1783 }
1784 }
1785
1786 MF cfine = linop.makeCoarseAmr(alev, IntVect(ng_dst));
1787 setVal(cfine, RT(0.0));
1788 ParallelCopy(cfine, crse_cor, 0, 0, ncomp, IntVect(ng_src), IntVect(ng_dst),
1789 crse_geom.periodicity());
1790
1791 linop.interpolationAmr(alev, fine_cor, cfine, nghost); // NOLINT(readability-suspicious-call-argument)
1792}
1793
1794// Interpolate correction between MG levels
1795// inout: Correction (cor) on coarse MG lev. (out due to FillBoundary)
1796// out : Correction (cor) on fine MG lev.
1797template <typename MF>
1798void
1799MLMGT<MF>::interpCorrection (int alev, int mglev)
1800{
1801 BL_PROFILE("MLMG::interpCorrection_2");
1802
1803 MF& crse_cor = cor[alev][mglev+1];
1804 MF& fine_cor = cor[alev][mglev ];
1805 linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1806}
1807
1808// (Fine MG level correction) += I(Coarse MG level correction)
1809template <typename MF>
1810void
1812{
1813 BL_PROFILE("MLMG::addInterpCorrection()");
1814
1815 const MF& crse_cor = cor[alev][mglev+1];
1816 MF& fine_cor = cor[alev][mglev ];
1817
1818 MF cfine;
1819 const MF* cmf;
1820
1821 if (linop.isMFIterSafe(alev, mglev, mglev+1))
1822 {
1823 cmf = &crse_cor;
1824 }
1825 else
1826 {
1827 cfine = linop.makeCoarseMG(alev, mglev, IntVect(0));
1828 ParallelCopy(cfine, crse_cor, 0, 0, ncomp);
1829 cmf = &cfine;
1830 }
1831
1832 linop.interpolation(alev, mglev, fine_cor, *cmf);
1833}
1834
1835// Compute rescor = res - L(cor)
1836// in : res
1837// inout: cor (out due to FillBoundary in linop.correctionResidual)
1838// out : rescor
1839template <typename MF>
1840void
1842{
1843 BL_PROFILE("MLMG:computeResOfCorrection()");
1844 MF & x = cor[amrlev][mglev];
1845 const MF& b = res[amrlev][mglev];
1846 MF & r = rescor[amrlev][mglev];
1847 linop.correctionResidual(amrlev, mglev, r, x, b, BCMode::Homogeneous);
1848}
1849
1850// Compute single-level masked inf-norm of Residual (res).
1851template <typename MF>
1852auto
1853MLMGT<MF>::ResNormInf (int alev, bool local) -> RT
1854{
1855 BL_PROFILE("MLMG::ResNormInf()");
1856 return linop.normInf(alev, res[alev][0], local);
1857}
1858
1859// Computes multi-level masked inf-norm of Residual (res).
1860template <typename MF>
1861auto
1862MLMGT<MF>::MLResNormInf (int alevmax, bool local) -> RT
1863{
1864 BL_PROFILE("MLMG::MLResNormInf()");
1865 RT r = RT(0.0);
1866 for (int alev = 0; alev <= alevmax; ++alev)
1867 {
1868 r = std::max(r, ResNormInf(alev,true));
1869 }
1871 return r;
1872}
1873
1874// Compute multi-level masked inf-norm of RHS (rhs).
1875template <typename MF>
1876auto
1878{
1879 BL_PROFILE("MLMG::MLRhsNormInf()");
1880 RT r = RT(0.0);
1881 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1882 auto t = linop.normInf(alev, rhs[alev], true);
1883 r = std::max(r, t);
1884 }
1886 return r;
1887}
1888
1889template <typename MF>
1890void
1892{
1893 auto const& offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1894 if (verbose >= 4) {
1895 for (int c = 0; c < ncomp; ++c) {
1896 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c] << " from rhs component "
1897 << c << "\n";
1898 }
1899 }
1900 for (int alev = 0; alev < namrlevs; ++alev) {
1901 linop.fixSolvabilityByOffset(alev, 0, rhs[alev], offset);
1902 }
1903}
1904
1905template <typename MF>
1906void
1907MLMGT<MF>::makeSolvable (int amrlev, int mglev, MF& mf)
1908{
1909 auto const& offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1910 if (verbose >= 4) {
1911 for (int c = 0; c < ncomp; ++c) {
1912 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c]
1913 << " from mf component c = " << c
1914 << " on level (" << amrlev << ", " << mglev << ")\n";
1915 }
1916 }
1917 linop.fixSolvabilityByOffset(amrlev, mglev, mf, offset);
1918}
1919
1920#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1921template <typename MF>
1922template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1923void
1924MLMGT<MF>::bottomSolveWithHypre (MF& x, const MF& b)
1925{
1926 const int amrlev = 0;
1927 const int mglev = linop.NMGLevels(amrlev) - 1;
1928
1929 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithHypre doesn't work with ncomp > 1");
1930
1931 if (linop.isCellCentered())
1932 {
1933 if (hypre_solver == nullptr) // We should reuse the setup
1934 {
1935 hypre_solver = linop.makeHypre(hypre_interface);
1936
1937 hypre_solver->setVerbose(bottom_verbose);
1938 if (hypre_interface == amrex::Hypre::Interface::ij) {
1939 hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1940 } else {
1941 hypre_solver->setHypreOldDefault(hypre_old_default);
1942 hypre_solver->setHypreRelaxType(hypre_relax_type);
1943 hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1944 hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1945 hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1946 }
1947
1948 const BoxArray& ba = linop.m_grids[amrlev].back();
1949 const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1950 const Geometry& geom = linop.m_geom[amrlev].back();
1951
1952 hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1953 hypre_bndry->setHomogValues();
1954 const Real* dx = linop.m_geom[0][0].CellSize();
1955 IntVect crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1956 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1957 0.5*dx[1]*crse_ratio[1],
1958 0.5*dx[2]*crse_ratio[2]));
1959 hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1960 linop.m_coarse_fine_bc_type);
1961 }
1962
1963 // IJ interface understands absolute tolerance API of hypre
1964 amrex::Real hypre_abstol =
1965 (hypre_interface == amrex::Hypre::Interface::ij)
1966 ? bottom_abstol : Real(-1.0);
1967 hypre_solver->solve(
1968 x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1969 linop.getMaxOrder());
1970 }
1971 else
1972 {
1973 if (hypre_node_solver == nullptr)
1974 {
1975 hypre_node_solver =
1976 linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1977 }
1978 hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1979 }
1980
1981 // For singular problems there may be a large constant added to all values of the solution
1982 // For precision reasons we enforce that the average of the correction from hypre is 0
1983 if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1984 {
1985 makeSolvable(amrlev, mglev, x);
1986 }
1987}
1988#endif
1989
1990#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1991template <typename MF>
1992template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1993void
1994MLMGT<MF>::bottomSolveWithPETSc (MF& x, const MF& b)
1995{
1996 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithPETSc doesn't work with ncomp > 1");
1997
1998 if(petsc_solver == nullptr)
1999 {
2000 petsc_solver = linop.makePETSc();
2001 petsc_solver->setVerbose(bottom_verbose);
2002
2003 const BoxArray& ba = linop.m_grids[0].back();
2004 const DistributionMapping& dm = linop.m_dmap[0].back();
2005 const Geometry& geom = linop.m_geom[0].back();
2006
2007 petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
2008 petsc_bndry->setHomogValues();
2009 const Real* dx = linop.m_geom[0][0].CellSize();
2010 auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
2011 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
2012 0.5*dx[1]*crse_ratio[1],
2013 0.5*dx[2]*crse_ratio[2]));
2014 petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
2015 linop.m_coarse_fine_bc_type);
2016 }
2017 petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
2018 linop.getMaxOrder());
2019}
2020#endif
2021
2022template <typename MF>
2023void
2024MLMGT<MF>::checkPoint (const Vector<MultiFab*>& a_sol,
2025 const Vector<MultiFab const*>& a_rhs,
2026 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const
2027{
2028 std::string file_name(a_file_name);
2029 UtilCreateCleanDirectory(file_name, false);
2030
2032 {
2033 std::string HeaderFileName(std::string(a_file_name)+"/Header");
2034 std::ofstream HeaderFile;
2035 HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
2036 std::ofstream::trunc |
2037 std::ofstream::binary);
2038 if( ! HeaderFile.good()) {
2039 FileOpenFailed(HeaderFileName);
2040 }
2041
2042 HeaderFile.precision(17);
2043
2044 std::string norm_name = getEnumNameString(norm_type);
2045
2046 HeaderFile << linop.name() << "\n"
2047 << "a_tol_rel = " << a_tol_rel << "\n"
2048 << "a_tol_abs = " << a_tol_abs << "\n"
2049 << "verbose = " << verbose << "\n"
2050 << "max_iters = " << max_iters << "\n"
2051 << "nu1 = " << nu1 << "\n"
2052 << "nu2 = " << nu2 << "\n"
2053 << "nuf = " << nuf << "\n"
2054 << "nub = " << nub << "\n"
2055 << "max_fmg_iters = " << max_fmg_iters << "\n"
2056 << "bottom_solver = " << static_cast<int>(bottom_solver) << "\n"
2057 << "bottom_verbose = " << bottom_verbose << "\n"
2058 << "bottom_maxiter = " << bottom_maxiter << "\n"
2059 << "bottom_reltol = " << bottom_reltol << "\n"
2060 << "convergence_norm = " << norm_name << "\n"
2061 << "namrlevs = " << namrlevs << "\n"
2062 << "finest_amr_lev = " << finest_amr_lev << "\n"
2063 << "linop_prepared = " << linop_prepared << "\n"
2064 << "solve_called = " << solve_called << "\n";
2065
2066 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2067 UtilCreateCleanDirectory(file_name+"/Level_"+std::to_string(ilev), false);
2068 }
2069 }
2070
2072
2073 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2074 VisMF::Write(*a_sol[ilev], file_name+"/Level_"+std::to_string(ilev)+"/sol");
2075 VisMF::Write(*a_rhs[ilev], file_name+"/Level_"+std::to_string(ilev)+"/rhs");
2076 }
2077
2078 linop.checkPoint(file_name+"/linop");
2079}
2080
2081template <typename MF>
2082void
2084{
2085 print_ident.resize(print_ident.size()+4, ' ');
2086}
2087
2088template <typename MF>
2089void
2091{
2092 if (print_ident.size() > 4) {
2093 print_ident.resize(print_ident.size()-4, ' ');
2094 } else {
2095 print_ident.clear();
2096 }
2097}
2098
2099extern template class MLMGT<MultiFab>;
2100
2103
2104}
2105
2106#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition AMReX_BLProfiler.H:560
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:208
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1139
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
Print on all processors of the default communicator.
Definition AMReX_Print.H:113
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:568
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:841
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition AMReX_DistributionMapping.H:189
Definition AMReX_EBFabFactory.H:25
bool isAllRegular() const noexcept
Definition AMReX_EBFabFactory.cpp:148
Solve using GMRES with multigrid as preconditioner.
Definition AMReX_GMRES_MLMG.H:22
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:356
Interface
HYPRE interface modes supported.
Definition AMReX_Hypre.H:37
__host__ __device__ constexpr bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:425
__host__ __device__ constexpr IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition AMReX_IntVect.H:775
__host__ static __device__ constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:698
Definition AMReX_MLCGSolver.H:12
void setSolver(Type _typ) noexcept
Definition AMReX_MLCGSolver.H:28
void setVerbose(int _verbose)
Definition AMReX_MLCGSolver.H:39
int getNumIters() const noexcept
Definition AMReX_MLCGSolver.H:64
void setInitSolnZeroed(bool _sol_zeroed)
Definition AMReX_MLCGSolver.H:53
void setPrintIdentation(std::string s)
Definition AMReX_MLCGSolver.H:45
int solve(MF &solnL, const MF &rhsL, RT eps_rel, RT eps_abs)
Definition AMReX_MLCGSolver.H:89
Type
Definition AMReX_MLCGSolver.H:18
void setNGhost(int _nghost)
Definition AMReX_MLCGSolver.H:56
void setMaxIter(int _maxiter)
Definition AMReX_MLCGSolver.H:42
Definition AMReX_MLLinOp.H:102
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLLinOp.H:112
typename FabDataType< MF >::value_type RT
Definition AMReX_MLLinOp.H:113
Definition AMReX_MLMG.H:22
Definition AMReX_MLMG.H:17
void prepareForFluxes(Vector< MF const * > const &a_sol)
Definition AMReX_MLMG.H:630
void setBottomVerbose(int v) noexcept
Definition AMReX_MLMG.H:148
void setMaxFmgIter(int n) noexcept
Definition AMReX_MLMG.H:136
RT MLResNormInf(int alevmax, bool local=false)
Definition AMReX_MLMG.H:1862
RT MLRhsNormInf(bool local=false)
Definition AMReX_MLMG.H:1877
void setNoGpuSync(bool do_not_sync) noexcept
Definition AMReX_MLMG.H:166
MLMGT(MLMGT< MF > &&)=delete
void actualBottomSolve()
Definition AMReX_MLMG.H:1546
MF MFType
Definition AMReX_MLMG.H:30
BottomSolver getBottomSolver() const noexcept
Definition AMReX_MLMG.H:146
void setPreSmooth(int n) noexcept
Definition AMReX_MLMG.H:140
void setBottomToleranceAbs(RT t) noexcept
Definition AMReX_MLMG.H:151
RT getFinalResidual() const noexcept
Definition AMReX_MLMG.H:246
void interpCorrection(int alev)
Definition AMReX_MLMG.H:1759
void getEBFluxes(const Vector< MF * > &a_eb_flux)
Definition AMReX_MLMG.H:851
void getGradSolution(const Vector< Array< AMF *, 3 > > &a_grad_sol, Location a_loc=Location::FaceCenter)
Definition AMReX_MLMG.H:641
void setBottomSmooth(int n) noexcept
Definition AMReX_MLMG.H:143
void setNSolve(int flag) noexcept
Definition AMReX_MLMG.H:163
int getBottomVerbose() const
Definition AMReX_MLMG.H:128
void computeResOfCorrection(int amrlev, int mglev)
Definition AMReX_MLMG.H:1841
void applyPrecond(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in) as a preconditioner
Definition AMReX_MLMG.H:1019
void setCFStrategy(CFStrategy a_cf_strategy) noexcept
Definition AMReX_MLMG.H:147
void computeResWithCrseCorFineCor(int falev)
Definition AMReX_MLMG.H:1735
void NSolve(MLMGT< MF > &a_solver, MF &a_sol, MF &a_rhs)
Definition AMReX_MLMG.H:1522
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLMG.H:35
void apply(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in). Note that, if no actual solve is needed, one could turn off multigrid coarsening by cons...
Definition AMReX_MLMG.H:944
void getFluxes(const Vector< Array< AMF *, 3 > > &a_flux, Location a_loc=Location::FaceCenter)
For (alpha * a - beta * (del dot b grad)) phi = rhs, flux means -b grad phi
Definition AMReX_MLMG.H:672
void setNSolveGridSize(int s) noexcept
Definition AMReX_MLMG.H:164
void setVerbose(int v) noexcept
Definition AMReX_MLMG.H:134
void computeMLResidual(int amrlevmax)
Definition AMReX_MLMG.H:1674
RT getInitResidual() const noexcept
Definition AMReX_MLMG.H:244
int getNumIters() const noexcept
Definition AMReX_MLMG.H:249
void setPostSmooth(int n) noexcept
Definition AMReX_MLMG.H:141
void mgVcycle(int amrlev, int mglev)
Definition AMReX_MLMG.H:1350
void prepareForNSolve()
Definition AMReX_MLMG.H:1237
RT precond(Vector< MF * > const &a_sol, Vector< MF const * > const &a_rhs, RT a_tol_rel, RT a_tol_abs)
Definition AMReX_MLMG.H:612
void makeSolvable()
Definition AMReX_MLMG.H:1891
void setBottomSolver(BottomSolver s) noexcept
Definition AMReX_MLMG.H:145
void preparePrecond()
Definition AMReX_MLMG.H:1229
void incPrintIdentation()
Definition AMReX_MLMG.H:2083
typename MLLinOpT< MF >::RT RT
Definition AMReX_MLMG.H:32
void setThrowException(bool t) noexcept
Definition AMReX_MLMG.H:133
void decPrintIdentation()
Definition AMReX_MLMG.H:2090
void setFixedIter(int nit) noexcept
Definition AMReX_MLMG.H:137
Vector< RT > const & getResidualHistory() const noexcept
Definition AMReX_MLMG.H:248
void prepareLinOp()
Definition AMReX_MLMG.H:1217
void setPrecondIter(int nit) noexcept
Definition AMReX_MLMG.H:138
CFStrategy
Definition AMReX_MLMG.H:38
void prepareForSolve(Vector< AMF * > const &a_sol, Vector< AMF const * > const &a_rhs)
Definition AMReX_MLMG.H:1031
int bottomSolveWithCG(MF &x, const MF &b, typename MLCGSolverT< MF >::Type type)
Definition AMReX_MLMG.H:1653
void setAlwaysUseBNorm(int flag) noexcept
Definition AMReX_MLMG.H:371
void compResidual(const Vector< MF * > &a_res, const Vector< MF * > &a_sol, const Vector< MF const * > &a_rhs)
Definition AMReX_MLMG.H:877
void miniCycle(int amrlev)
Definition AMReX_MLMG.H:1339
RT solve(std::initializer_list< AMF * > a_sol, std::initializer_list< AMF const * > a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setFinalFillBC(int flag) noexcept
Definition AMReX_MLMG.H:159
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLMG.H:34
void setConvergenceNormType(MLMGNormType norm) noexcept
Definition AMReX_MLMG.H:157
void computeResWithCrseSolFineCor(int calev, int falev)
Definition AMReX_MLMG.H:1702
MLMGT< MF > & operator=(MLMGT< MF > const &)=delete
MLMGT(MLLinOpT< MF > &a_lp)
Definition AMReX_MLMG.H:362
void computeResidual(int alev)
Definition AMReX_MLMG.H:1692
MLLinOpT< MF > & getLinOp()
Definition AMReX_MLMG.H:252
typename MLLinOpT< MF >::FAB FAB
Definition AMReX_MLMG.H:31
RT getBottomToleranceAbs() const noexcept
Definition AMReX_MLMG.H:152
int numAMRLevels() const noexcept
Definition AMReX_MLMG.H:161
MLMGT(MLMGT< MF > const &)=delete
void mgFcycle()
Definition AMReX_MLMG.H:1456
RT getInitRHS() const noexcept
Definition AMReX_MLMG.H:242
RT ResNormInf(int alev, bool local=false)
Definition AMReX_MLMG.H:1853
Vector< int > const & getNumCGIters() const noexcept
Definition AMReX_MLMG.H:250
void bottomSolve()
Definition AMReX_MLMG.H:1508
void setBottomTolerance(RT t) noexcept
Definition AMReX_MLMG.H:150
void setFinalSmooth(int n) noexcept
Definition AMReX_MLMG.H:142
void addInterpCorrection(int alev, int mglev)
Definition AMReX_MLMG.H:1811
int getVerbose() const
Definition AMReX_MLMG.H:127
RT solve(const Vector< AMF * > &a_sol, const Vector< AMF const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setBottomMaxIter(int n) noexcept
Definition AMReX_MLMG.H:149
void oneIter(int iter)
Definition AMReX_MLMG.H:1270
void setMaxIter(int n) noexcept
Definition AMReX_MLMG.H:135
This class provides the user with a few print options.
Definition AMReX_Print.H:35
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
static Long Write(const FabArray< FArrayBox > &mf, const std::string &name, VisMF::How how=NFiles, bool set_ghost=false)
Write a FabArray<FArrayBox> to disk in a "smart" way. Returns the total number of bytes written on th...
Definition AMReX_VisMF.cpp:979
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
amrex_long Long
Definition AMReX_INT.H:30
std::array< T, N > Array
Definition AMReX_Array.H:26
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
bool setNoSyncRegion(bool b) noexcept
Definition AMReX_GpuControl.H:158
bool setSingleStreamRegion(bool b) noexcept
Definition AMReX_GpuControl.H:154
void push(MPI_Comm c)
Definition AMReX_ParallelContext.H:102
void BarrierSub() noexcept
Definition AMReX_ParallelContext.H:88
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
void pop()
Note that it's the user's responsibility to free the MPI_Comm.
Definition AMReX_ParallelContext.H:108
bool IOProcessorSub() noexcept
Am IO processor for current frame?
Definition AMReX_ParallelContext.H:80
int verbose
Definition AMReX.cpp:109
Definition AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:2029
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2851
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:137
void EB_average_face_to_cellcenter(MultiFab &ccmf, int dcomp, const Array< MultiFab const *, 3 > &fmf)
Definition AMReX_EBMultiFabUtil.cpp:806
std::array< T const *, 3 > GetArrOfConstPtrs(const std::array< T, 3 > &a) noexcept
Create an array of const-qualified pointers from an array of objects.
Definition AMReX_Array.H:1047
__host__ __device__ T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:349
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2866
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
std::string getEnumNameString(T const &v)
Definition AMReX_Enum.H:156
IntVect nGrowVect(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2856
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, IntVect const &ng_vect)
Definition AMReX_MultiFabUtil.cpp:155
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition AMReX_FabArrayUtility.H:1950
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition AMReX_FabArrayUtility.H:1958
double second() noexcept
Definition AMReX_Utility.cpp:940
std::array< T *, 3 > GetArrOfPtrs(std::array< T, 3 > &a) noexcept
Create an array of pointers from an array of objects.
Definition AMReX_Array.H:1001
void UtilCreateCleanDirectory(const std::string &path, bool callbarrier=true)
Create a new directory, renaming the old one if it exists.
Definition AMReX_Utility.cpp:167
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
void computeResidual(AlgVector< T, AllocV > &res, SpMatrix< T, AllocM > const &A, AlgVector< T, AllocV > const &x, AlgVector< T, AllocV > const &b)
res = b - A*x
Definition AMReX_SpMV.H:226
BottomSolver
Definition AMReX_MLLinOp.H:31
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
RealVectND< 3 > RealVect
Definition AMReX_ParmParse.H:35
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition AMReX_FabArrayUtility.H:1934
Vector< T * > GetVecOfPtrs(Vector< T > &a)
Definition AMReX_Vector.H:64
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition AMReX_FabArrayUtility.H:1941
Vector< std::array< T *, 3 > > GetVecOfArrOfPtrs(const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
Definition AMReX_Vector.H:141
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
MLMGNormType
Definition AMReX_MLMG.H:12
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:2019
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition AMReX_FabArrayUtility.H:1927
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2861
BCMode
Definition AMReX_MLLinOp.H:88
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66