Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_MLMG.H
Go to the documentation of this file.
1#ifndef AMREX_ML_MG_H_
2#define AMREX_ML_MG_H_
3#include <AMReX_Config.H>
4#include <AMReX_Enum.H>
5
6#include <AMReX_MLLinOp.H>
7#include <AMReX_MLCGSolver.H>
8
9namespace amrex {
10
11// Norm used to evaluate the target convergece criteria
13
15template <typename MF>
16class MLMGT
17{
18public:
19
20 class error
21 : public std::runtime_error
22 {
23 public :
24 using std::runtime_error::runtime_error;
25 };
26
27 template <typename T> friend class MLCGSolverT;
28 template <typename M> friend class GMRESMLMGT;
29
30 using MFType = MF;
31 using FAB = typename MLLinOpT<MF>::FAB;
32 using RT = typename MLLinOpT<MF>::RT;
33
34 using BCMode = typename MLLinOpT<MF>::BCMode;
36
38 enum class CFStrategy : int {none,ghostnodes};
39
40 MLMGT (MLLinOpT<MF>& a_lp);
42
43 MLMGT (MLMGT<MF> const&) = delete;
44 MLMGT (MLMGT<MF> &&) = delete;
45 MLMGT<MF>& operator= (MLMGT<MF> const&) = delete;
47
48 // Optional argument checkpoint_file is for debugging only.
49 template <typename AMF>
50 RT solve (const Vector<AMF*>& a_sol, const Vector<AMF const*>& a_rhs,
51 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
52
53 template <typename AMF>
54 RT solve (std::initializer_list<AMF*> a_sol,
55 std::initializer_list<AMF const*> a_rhs,
56 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file = nullptr);
57
58 RT precond (Vector<MF*> const& a_sol, Vector<MF const*> const& a_rhs,
59 RT a_tol_rel, RT a_tol_abs);
60
61 template <typename AMF>
62 void getGradSolution (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_grad_sol,
63 Location a_loc = Location::FaceCenter);
64
65 template <typename AMF>
66 void getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol,
67 Location a_loc = Location::FaceCenter);
68
72 template <typename AMF>
73 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
74 Location a_loc = Location::FaceCenter);
75
76 template <typename AMF>
77 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
78 Location a_loc = Location::FaceCenter);
79
80 template <typename AMF>
81 void getFluxes (const Vector<Array<AMF*,AMREX_SPACEDIM> >& a_flux,
82 const Vector<AMF*> & a_sol,
83 Location a_loc = Location::FaceCenter);
84
85 template <typename AMF>
86 void getFluxes (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_flux,
87 std::initializer_list<AMF*> a_sol,
88 Location a_loc = Location::FaceCenter);
89
90 template <typename AMF>
91 void getFluxes (const Vector<AMF*> & a_flux,
92 Location a_loc = Location::CellCenter);
93
94 template <typename AMF>
95 void getFluxes (std::initializer_list<AMF*> a_flux,
96 Location a_loc = Location::CellCenter);
97
98 template <typename AMF>
99 void getFluxes (const Vector<AMF*> & a_flux,
100 const Vector<AMF*> & a_sol,
101 Location a_loc = Location::CellCenter);
102
103 template <typename AMF>
104 void getFluxes (std::initializer_list<AMF*> a_flux,
105 std::initializer_list<AMF*> a_sol,
106 Location a_loc = Location::CellCenter);
107
108 void compResidual (const Vector<MF*>& a_res, const Vector<MF*>& a_sol,
109 const Vector<MF const*>& a_rhs);
110
111#ifdef AMREX_USE_EB
112 // Flux into the EB wall
113 void getEBFluxes (const Vector<MF*>& a_eb_flux);
114 void getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*> & a_sol);
115#endif
116
122 void apply (const Vector<MF*>& out, const Vector<MF*>& in);
123
125 void applyPrecond (const Vector<MF*>& out, const Vector<MF*>& in);
126
127 [[nodiscard]] int getVerbose () const { return verbose; }
128 [[nodiscard]] int getBottomVerbose () const { return bottom_verbose; }
129
130 void incPrintIdentation ();
131 void decPrintIdentation ();
132
133 void setThrowException (bool t) noexcept { throw_exception = t; }
134 void setVerbose (int v) noexcept { verbose = v; }
135 void setMaxIter (int n) noexcept { max_iters = n; }
136 void setMaxFmgIter (int n) noexcept { max_fmg_iters = n; }
137 void setFixedIter (int nit) noexcept { do_fixed_number_of_iters = nit; }
138 void setPrecondIter (int nit) noexcept { max_precond_iters = nit; }
139
140 void setPreSmooth (int n) noexcept { nu1 = n; }
141 void setPostSmooth (int n) noexcept { nu2 = n; }
142 void setFinalSmooth (int n) noexcept { nuf = n; }
143 void setBottomSmooth (int n) noexcept { nub = n; }
144
145 void setBottomSolver (BottomSolver s) noexcept { bottom_solver = s; }
146 [[nodiscard]] BottomSolver getBottomSolver () const noexcept { return bottom_solver; }
147 void setCFStrategy (CFStrategy a_cf_strategy) noexcept {cf_strategy = a_cf_strategy;}
148 void setBottomVerbose (int v) noexcept { bottom_verbose = v; }
149 void setBottomMaxIter (int n) noexcept { bottom_maxiter = n; }
150 void setBottomTolerance (RT t) noexcept { bottom_reltol = t; }
151 void setBottomToleranceAbs (RT t) noexcept { bottom_abstol = t;}
152 [[nodiscard]] RT getBottomToleranceAbs () const noexcept{ return bottom_abstol; }
153
154 [[deprecated("Use MLMG::setConvergenceNormType() instead.")]]
155 void setAlwaysUseBNorm (int flag) noexcept;
156
157 void setConvergenceNormType (MLMGNormType norm) noexcept { norm_type = norm; }
158
159 void setFinalFillBC (int flag) noexcept { final_fill_bc = flag; }
160
161 [[nodiscard]] int numAMRLevels () const noexcept { return namrlevs; }
162
163 void setNSolve (int flag) noexcept { do_nsolve = flag; }
164 void setNSolveGridSize (int s) noexcept { nsolve_grid_size = s; }
165
166 void setNoGpuSync (bool do_not_sync) noexcept { do_no_sync_gpu = do_not_sync; }
167
168#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
169 void setHypreInterface (Hypre::Interface f) noexcept {
170 // must use ij interface for EB
171#ifndef AMREX_USE_EB
172 hypre_interface = f;
173#else
175#endif
176 }
177
179 void setHypreOptionsNamespace(const std::string& prefix) noexcept
180 {
181 hypre_options_namespace = prefix;
182 }
183
184 void setHypreOldDefault (bool l) noexcept {hypre_old_default = l;}
185 void setHypreRelaxType (int n) noexcept {hypre_relax_type = n;}
186 void setHypreRelaxOrder (int n) noexcept {hypre_relax_order = n;}
187 void setHypreNumSweeps (int n) noexcept {hypre_num_sweeps = n;}
188 void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
189#endif
190
191 void prepareForFluxes (Vector<MF const*> const& a_sol);
192
193 template <typename AMF>
194 void prepareForSolve (Vector<AMF*> const& a_sol, Vector<AMF const*> const& a_rhs);
195
196 void prepareForNSolve ();
197
198 void prepareLinOp ();
199
200 void preparePrecond ();
201
202 void oneIter (int iter);
203
204 void miniCycle (int amrlev);
205
206 void mgVcycle (int amrlev, int mglev);
207 void mgFcycle ();
208
209 void bottomSolve ();
210 void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
211 void actualBottomSolve ();
212
213 void computeMLResidual (int amrlevmax);
214 void computeResidual (int alev);
215 void computeResWithCrseSolFineCor (int calev, int falev);
216 void computeResWithCrseCorFineCor (int falev);
217 void interpCorrection (int alev);
218 void interpCorrection (int alev, int mglev);
219 void addInterpCorrection (int alev, int mglev);
220
221 void computeResOfCorrection (int amrlev, int mglev);
222
223 RT ResNormInf (int alev, bool local = false);
224 RT MLResNormInf (int alevmax, bool local = false);
225 RT MLRhsNormInf (bool local = false);
226
227 void makeSolvable ();
228 void makeSolvable (int amrlev, int mglev, MF& mf);
229
230#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
231 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
232 void bottomSolveWithHypre (MF& x, const MF& b);
233#endif
234
235#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
236 template <class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int> = 0>
237 void bottomSolveWithPETSc (MF& x, const MF& b);
238#endif
239
240 int bottomSolveWithCG (MF& x, const MF& b, typename MLCGSolverT<MF>::Type type);
241
242 [[nodiscard]] RT getInitRHS () const noexcept { return m_rhsnorm0; }
243 // Initial composite residual
244 [[nodiscard]] RT getInitResidual () const noexcept { return m_init_resnorm0; }
245 // Final composite residual
246 [[nodiscard]] RT getFinalResidual () const noexcept { return m_final_resnorm0; }
247 // Residuals on the *finest* AMR level after each iteration
248 [[nodiscard]] Vector<RT> const& getResidualHistory () const noexcept { return m_iter_fine_resnorm0; }
249 [[nodiscard]] int getNumIters () const noexcept { return m_iter_fine_resnorm0.size(); }
250 [[nodiscard]] Vector<int> const& getNumCGIters () const noexcept { return m_niters_cg; }
251
252 MLLinOpT<MF>& getLinOp () { return linop; }
253
254private:
255
256 bool precond_mode = false;
257 bool throw_exception = false;
258 int verbose = 1;
259
260 int max_iters = 200;
261 int do_fixed_number_of_iters = 0;
262 int max_precond_iters = 1;
263
264 int nu1 = 2;
265 int nu2 = 2;
266 int nuf = 8;
267 int nub = 0;
268
269 int max_fmg_iters = 0;
270
271 BottomSolver bottom_solver = BottomSolver::Default;
272 CFStrategy cf_strategy = CFStrategy::none;
273 int bottom_verbose = 0;
274 int bottom_maxiter = 200;
275 RT bottom_reltol = std::is_same<RT,double>() ? RT(1.e-4) : RT(1.e-3);
276 RT bottom_abstol = RT(-1.0);
277
279
280 int final_fill_bc = 0;
281
282 MLLinOpT<MF>& linop;
283 int ncomp;
284 int namrlevs;
285 int finest_amr_lev;
286
287 bool linop_prepared = false;
288 Long solve_called = 0;
289
291 int do_nsolve = false;
292 int nsolve_grid_size = 16;
293 std::unique_ptr<MLLinOpT<MF>> ns_linop;
294 std::unique_ptr<MLMGT<MF>> ns_mlmg;
295 std::unique_ptr<MF> ns_sol;
296 std::unique_ptr<MF> ns_rhs;
297
298 std::string print_ident;
299
300 bool do_no_sync_gpu = false;
301
303#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
304 // Hypre::Interface hypre_interface = Hypre::Interface::structed;
305 // Hypre::Interface hypre_interface = Hypre::Interface::semi_structed;
306 Hypre::Interface hypre_interface = Hypre::Interface::ij;
307
308 std::unique_ptr<Hypre> hypre_solver;
309 std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
310 std::unique_ptr<HypreNodeLap> hypre_node_solver;
311
312 std::string hypre_options_namespace = "hypre";
313 bool hypre_old_default = true; // Falgout coarsening with modified classical interpolation
314 int hypre_relax_type = 6; // G-S/Jacobi hybrid relaxation
315 int hypre_relax_order = 1; // uses C/F relaxation
316 int hypre_num_sweeps = 2; // Sweeps on each level
317 Real hypre_strong_threshold = 0.25; // Hypre default is 0.25
318#endif
319
321#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
322 std::unique_ptr<PETScABecLap> petsc_solver;
323 std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
324#endif
325
330 Vector<MF> sol;
331 Vector<MF> rhs;
333
334 Vector<int> sol_is_alias;
335
340 Vector<Vector<MF> > res;
341 Vector<Vector<MF> > cor;
342 Vector<Vector<MF> > cor_hold;
343 Vector<Vector<MF> > rescor;
345
346 enum timer_types { solve_time=0, iter_time, bottom_time, ntimers };
347 Vector<double> timer;
348
349 RT m_rhsnorm0 = RT(-1.0);
350 RT m_init_resnorm0 = RT(-1.0);
351 RT m_final_resnorm0 = RT(-1.0);
352 Vector<int> m_niters_cg;
353 Vector<RT> m_iter_fine_resnorm0; // Residual for each iteration at the finest level
354
355 void checkPoint (const Vector<MultiFab*>& a_sol,
356 const Vector<MultiFab const*>& a_rhs,
357 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const;
358
359};
360
361template <typename MF>
363 : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
364 finest_amr_lev(a_lp.NAMRLevels()-1)
365{}
366
367template <typename MF> MLMGT<MF>::~MLMGT () = default;
368
369template <typename MF>
370void
372{
373 if (flag) {
374 norm_type = MLMGNormType::bnorm;
375 } else {
376 norm_type = MLMGNormType::greater;
377 }
378}
379
380template <typename MF>
381template <typename AMF>
382auto
383MLMGT<MF>::solve (std::initializer_list<AMF*> a_sol,
384 std::initializer_list<AMF const*> a_rhs,
385 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
386{
387 return solve(Vector<AMF*>(std::move(a_sol)),
388 Vector<AMF const*>(std::move(a_rhs)),
389 a_tol_rel, a_tol_abs, checkpoint_file);
390}
391
392template <typename MF>
393template <typename AMF>
394auto
396 RT a_tol_rel, RT a_tol_abs, const char* checkpoint_file) -> RT
397{
398 BL_PROFILE("MLMG::solve()");
399
400 bool prev_in_single_stream_region = false;
401 bool prev_in_nosync_region = false;
402
403 if (do_no_sync_gpu) {
404 prev_in_single_stream_region = Gpu::setSingleStreamRegion(true);
405 prev_in_nosync_region = Gpu::setNoSyncRegion(true);
406 }
407
408 if constexpr (std::is_same<AMF,MultiFab>()) {
409 if (checkpoint_file != nullptr) {
410 checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
411 }
412 }
413
414 if (bottom_solver == BottomSolver::Default) {
415 bottom_solver = linop.getDefaultBottomSolver();
416 }
417
418#if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
419 if constexpr (IsFabArray_v<AMF>) {
420 if (bottom_solver == BottomSolver::hypre || bottom_solver == BottomSolver::petsc) {
421 int mo = linop.getMaxOrder();
422 if (a_sol[0]->hasEBFabFactory()) {
423 linop.setMaxOrder(2);
424 } else {
425 linop.setMaxOrder(std::min(3,mo)); // maxorder = 4 not supported
426 }
427 }
428 }
429#endif
430
431 bool is_nsolve = linop.m_parent;
432
433 auto solve_start_time = amrex::second();
434
435 RT& composite_norminf = m_final_resnorm0;
436
437 m_niters_cg.clear();
438 m_iter_fine_resnorm0.clear();
439
440 prepareForSolve(a_sol, a_rhs);
441
442 computeMLResidual(finest_amr_lev);
443
444 bool local = true;
445 RT resnorm0 = MLResNormInf(finest_amr_lev, local);
446 RT rhsnorm0 = MLRhsNormInf(local);
447 if (!is_nsolve) {
448 ParallelAllReduce::Max<RT>({resnorm0, rhsnorm0}, ParallelContext::CommunicatorSub());
449
450 if (verbose >= 1)
451 {
452 amrex::Print() << print_ident << "MLMG: Initial rhs = " << rhsnorm0 << "\n"
453 << print_ident << "MLMG: Initial residual (resid0) = " << resnorm0 << "\n";
454 }
455 }
456
457 m_init_resnorm0 = resnorm0;
458 m_rhsnorm0 = rhsnorm0;
459
460 RT max_norm = resnorm0;
461 std::string norm_name = "resid0";
462 switch (norm_type) {
464 if (rhsnorm0 >= resnorm0) {
465 norm_name = "bnorm";
466 max_norm = rhsnorm0;
467 } else {
468 norm_name = "resid0";
469 max_norm = resnorm0;
470 }
471 break;
473 norm_name = "bnorm";
474 max_norm = rhsnorm0;
475 break;
477 norm_name = "resid0";
478 max_norm = resnorm0;
479 break;
480 }
481
482 const RT res_target = std::max(a_tol_abs, std::max(a_tol_rel,RT(1.e-16))*max_norm);
483
484 if (!is_nsolve && resnorm0 <= res_target) {
485 composite_norminf = resnorm0;
486 if (verbose >= 1) {
487 amrex::Print() << print_ident << "MLMG: No iterations needed\n";
488 }
489 } else {
490 auto iter_start_time = amrex::second();
491 bool converged = false;
492
493 const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
494 for (int iter = 0; iter < niters; ++iter)
495 {
496 oneIter(iter);
497
498 converged = false;
499
500 // Test convergence on the fine amr level
501 computeResidual(finest_amr_lev);
502
503 if (is_nsolve) { continue; }
504
505 RT fine_norminf = ResNormInf(finest_amr_lev);
506 m_iter_fine_resnorm0.push_back(fine_norminf);
507 composite_norminf = fine_norminf;
508 if (verbose >= 2) {
509 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1 << " Fine resid/"
510 << norm_name << " = " << fine_norminf/max_norm << "\n";
511 }
512 bool fine_converged = (fine_norminf <= res_target);
513
514 if (namrlevs == 1 && fine_converged) {
515 converged = true;
516 } else if (fine_converged) {
517 // finest level is converged, but we still need to test the coarse levels
518 computeMLResidual(finest_amr_lev-1);
519 RT crse_norminf = MLResNormInf(finest_amr_lev-1);
520 if (verbose >= 2) {
521 amrex::Print() << print_ident << "MLMG: Iteration " << std::setw(3) << iter+1
522 << " Crse resid/" << norm_name << " = "
523 << crse_norminf/max_norm << "\n";
524 }
525 converged = (crse_norminf <= res_target);
526 composite_norminf = std::max(fine_norminf, crse_norminf);
527 } else {
528 converged = false;
529 }
530
531 if (converged) {
532 if (verbose >= 1) {
533 amrex::Print() << print_ident << "MLMG: Final Iter. " << iter+1
534 << " resid, resid/" << norm_name << " = "
535 << composite_norminf << ", "
536 << composite_norminf/max_norm << "\n";
537 }
538 break;
539 } else {
540 if (composite_norminf > RT(1.e20)*max_norm)
541 {
542 if (verbose > 0) {
543 amrex::Print() << print_ident << "MLMG: Failing to converge after " << iter+1 << " iterations."
544 << " resid, resid/" << norm_name << " = "
545 << composite_norminf << ", "
546 << composite_norminf/max_norm << "\n";
547 }
548
549 if ( throw_exception ) {
550 throw error("MLMG blew up.");
551 } else {
552 amrex::Abort("MLMG failing so lets stop here");
553 }
554 }
555 }
556 }
557
558 if (!converged && do_fixed_number_of_iters == 0) {
559 if (verbose > 0) {
560 amrex::Print() << print_ident << "MLMG: Failed to converge after " << max_iters << " iterations."
561 << " resid, resid/" << norm_name << " = "
562 << composite_norminf << ", "
563 << composite_norminf/max_norm << "\n";
564 }
565
566 if ( throw_exception ) {
567 throw error("MLMG failed to converge.");
568 } else {
569 amrex::Abort("MLMG failed.");
570 }
571 }
572 timer[iter_time] = amrex::second() - iter_start_time;
573 }
574
575 linop.postSolve(GetVecOfPtrs(sol));
576
577 IntVect ng_back = final_fill_bc ? IntVect(1) : IntVect(0);
578 if (linop.hasHiddenDimension()) {
579 ng_back[linop.hiddenDirection()] = 0;
580 }
581 for (int alev = 0; alev < namrlevs; ++alev)
582 {
583 if (!sol_is_alias[alev]) {
584 LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
585 }
586 }
587
588 timer[solve_time] = amrex::second() - solve_start_time;
589 if (verbose >= 1) {
590 ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
593 {
594 amrex::AllPrint() << print_ident << "MLMG: Timers: Solve = " << timer[solve_time]
595 << " Iter = " << timer[iter_time]
596 << " Bottom = " << timer[bottom_time] << "\n";
597 }
598 }
599
600 ++solve_called;
601
602 if (do_no_sync_gpu) {
603 (void)Gpu::setSingleStreamRegion(prev_in_single_stream_region);
604 (void)Gpu::setNoSyncRegion(prev_in_nosync_region);
605 }
606
607 return composite_norminf;
608}
609
610template <typename MF>
611auto
613 RT a_tol_rel, RT a_tol_abs) -> RT
614{
615 precond_mode = true;
616 std::swap(max_precond_iters, do_fixed_number_of_iters);
617 linop.beginPrecondBC();
618
619 auto r = solve(a_sol, a_rhs, a_tol_rel, a_tol_abs);
620
621 linop.endPrecondBC();
622 std::swap(max_precond_iters, do_fixed_number_of_iters);
623 precond_mode = false;
624
625 return r;
626}
627
628template <typename MF>
629void
631{
632 for (int alev = finest_amr_lev; alev >= 0; --alev) {
633 const MF* crse_bcdata = (alev > 0) ? a_sol[alev-1] : nullptr;
634 linop.prepareForFluxes(alev, crse_bcdata);
635 }
636}
637
638template <typename MF>
639template <typename AMF>
640void
642{
643 BL_PROFILE("MLMG::getGradSolution()");
644 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
645 if constexpr (std::is_same<AMF,MF>()) {
646 linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
647 } else {
649 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
650 auto const& amf = *(a_grad_sol[alev][idim]);
651 grad_sol[idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
652 }
653 linop.compGrad(alev, GetArrOfPtrs(grad_sol), sol[alev], a_loc);
654 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
655 LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp, IntVect(0));
656 }
657 }
658 }
659}
660
661template <typename MF>
662template <typename AMF>
663void
664MLMGT<MF>::getGradSolution (std::initializer_list<Array<AMF*,AMREX_SPACEDIM>> a_grad_sol, Location a_loc)
665{
666 getGradSolution(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_grad_sol)), a_loc);
667}
668
669template <typename MF>
670template <typename AMF>
671void
673 Location a_loc)
674{
675 if (!linop.isCellCentered()) {
676 amrex::Abort("Calling wrong getFluxes for nodal solver");
677 }
678
679 AMREX_ASSERT(sol.size() == a_flux.size());
680
681 if constexpr (std::is_same<AMF,MF>()) {
682 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
683 } else {
684 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
685 for (int ilev = 0; ilev < namrlevs; ++ilev) {
686 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
687 auto const& amf = *(a_flux[ilev][idim]);
688 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
689 }
690 }
691 getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
692 for (int ilev = 0; ilev < namrlevs; ++ilev) {
693 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
694 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
695 }
696 }
697 }
698}
699
700template <typename MF>
701template <typename AMF>
702void
704 Location a_loc)
705{
706 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)), a_loc);
707}
708
709template <typename MF>
710template <typename AMF>
711void
713 const Vector<AMF*>& a_sol, Location a_loc)
714{
715 BL_PROFILE("MLMG::getFluxes()");
716
717 if (!linop.isCellCentered()) {
718 amrex::Abort("Calling wrong getFluxes for nodal solver");
719 }
720
721 if constexpr (std::is_same<AMF,MF>()) {
722 linop.getFluxes(a_flux, a_sol, a_loc);
723 } else {
724 Vector<Array<MF,AMREX_SPACEDIM>> fluxes(namrlevs);
725 for (int ilev = 0; ilev < namrlevs; ++ilev) {
726 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
727 auto const& amf = *(a_flux[ilev][idim]);
728 fluxes[ilev][idim].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
729 }
730 LocalCopy(sol[ilev], *a_sol[ilev], 0, 0, ncomp, nGrowVect(sol[ilev]));
731 }
732 linop.getFluxes(GetVecOfArrOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
733 for (int ilev = 0; ilev < namrlevs; ++ilev) {
734 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
735 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp, IntVect(0));
736 }
737 }
738 }
739}
740
741template <typename MF>
742template <typename AMF>
743void
745 std::initializer_list<AMF*> a_sol, Location a_loc)
746{
747 getFluxes(Vector<Array<AMF*,AMREX_SPACEDIM>>(std::move(a_flux)),
748 Vector<AMF*>(std::move(a_sol)), a_loc);
749}
750
751template <typename MF>
752template <typename AMF>
753void
755{
756 AMREX_ASSERT(sol.size() == a_flux.size());
757 if constexpr (std::is_same<AMF,MF>()) {
758 getFluxes(a_flux, GetVecOfPtrs(sol), a_loc);
759 } else {
760 Vector<MF> fluxes(namrlevs);
761 for (int ilev = 0; ilev < namrlevs; ++ilev) {
762 auto const& amf = *a_flux[ilev];
763 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
764 }
765 getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol), a_loc);
766 for (int ilev = 0; ilev < namrlevs; ++ilev) {
767 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
768 }
769 }
770}
771
772template <typename MF>
773template <typename AMF>
774void
775MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux, Location a_loc)
776{
777 getFluxes(Vector<AMF*>(std::move(a_flux)), a_loc);
778}
779
780template <typename MF>
781template <typename AMF>
782void
784 const Vector<AMF*>& a_sol, Location /*a_loc*/)
785{
786 AMREX_ASSERT(nComp(*a_flux[0]) >= AMREX_SPACEDIM);
787
788 if constexpr (! std::is_same<AMF,MF>()) {
789 for (int alev = 0; alev < namrlevs; ++alev) {
790 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, nGrowVect(sol[alev]));
791 }
792 }
793
794 if (linop.isCellCentered())
795 {
796 Vector<Array<MF,AMREX_SPACEDIM> > ffluxes(namrlevs);
797 for (int alev = 0; alev < namrlevs; ++alev) {
798 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
799 const int mglev = 0;
800 int nghost = 0;
801 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
802 ffluxes[alev][idim].define(amrex::convert(linop.m_grids[alev][mglev],
804 linop.m_dmap[alev][mglev], ncomp, nghost, MFInfo(),
805 *linop.m_factory[alev][mglev]);
806 }
807 }
808 if constexpr (std::is_same<AMF,MF>()) {
809 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), a_sol, Location::FaceCenter);
810 } else {
811 getFluxes(amrex::GetVecOfArrOfPtrs(ffluxes), GetVecOfPtrs(sol), Location::FaceCenter);
812 }
813 for (int alev = 0; alev < namrlevs; ++alev) {
814#ifdef AMREX_USE_EB
815 EB_average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
816#else
817 average_face_to_cellcenter(*a_flux[alev], 0, amrex::GetArrOfConstPtrs(ffluxes[alev]));
818#endif
819 }
820
821 } else {
822 if constexpr (std::is_same<AMF,MF>()) {
823 linop.getFluxes(a_flux, a_sol);
824 } else {
825 Vector<MF> fluxes(namrlevs);
826 for (int ilev = 0; ilev < namrlevs; ++ilev) {
827 auto const& amf = *a_flux[ilev];
828 fluxes[ilev].define(boxArray(amf), DistributionMap(amf), ncomp, 0);
829 }
830 linop.getFluxes(GetVecOfPtrs(fluxes), GetVecOfPtrs(sol));
831 for (int ilev = 0; ilev < namrlevs; ++ilev) {
832 LocalCopy(*a_flux[ilev], fluxes[ilev], 0, 0, ncomp, IntVect(0));
833 }
834 }
835 }
836}
837
838template <typename MF>
839template <typename AMF>
840void
841MLMGT<MF>::getFluxes (std::initializer_list<AMF*> a_flux,
842 std::initializer_list<AMF*> a_sol, Location a_loc)
843{
844 getFluxes(Vector<AMF*>(std::move(a_flux)),
845 Vector<AMF*>(std::move(a_sol)), a_loc);
846}
847
848#ifdef AMREX_USE_EB
849template <typename MF>
850void
852{
853 if (!linop.isCellCentered()) {
854 amrex::Abort("getEBFluxes is for cell-centered only");
855 }
856
857 AMREX_ASSERT(sol.size() == a_eb_flux.size());
858 getEBFluxes(a_eb_flux, GetVecOfPtrs(sol));
859}
860
861template <typename MF>
862void
863MLMGT<MF>::getEBFluxes (const Vector<MF*>& a_eb_flux, const Vector<MF*>& a_sol)
864{
865 BL_PROFILE("MLMG::getEBFluxes()");
866
867 if (!linop.isCellCentered()) {
868 amrex::Abort("getEBFluxes is for cell-centered only");
869 }
870
871 linop.getEBFluxes(a_eb_flux, a_sol);
872}
873#endif
874
875template <typename MF>
876void
878 const Vector<MF const*>& a_rhs)
879{
880 BL_PROFILE("MLMG::compResidual()");
881
882 IntVect ng_sol(1);
883 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
884
885 sol.resize(namrlevs);
886 sol_is_alias.resize(namrlevs,true);
887 for (int alev = 0; alev < namrlevs; ++alev)
888 {
889 if (cf_strategy == CFStrategy::ghostnodes || nGrowVect(*a_sol[alev]) == ng_sol)
890 {
891 sol[alev] = linop.makeAlias(*a_sol[alev]);
892 sol_is_alias[alev] = true;
893 }
894 else
895 {
896 if (sol_is_alias[alev])
897 {
898 sol[alev] = linop.make(alev, 0, ng_sol);
899 }
900 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
901 }
902 }
903
904 prepareLinOp();
905
906 const auto& amrrr = linop.AMRRefRatio();
907
908 for (int alev = finest_amr_lev; alev >= 0; --alev) {
909 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
910 const MF* prhs = a_rhs[alev];
911#if (AMREX_SPACEDIM != 3)
912 int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
913 MF rhstmp(boxArray(*prhs), DistributionMap(*prhs), ncomp, nghost,
914 MFInfo(), *linop.Factory(alev));
915 LocalCopy(rhstmp, *prhs, 0, 0, ncomp, IntVect(nghost));
916 linop.applyMetricTerm(alev, 0, rhstmp);
917 linop.unimposeNeumannBC(alev, rhstmp);
918 linop.applyInhomogNeumannTerm(alev, rhstmp);
919 prhs = &rhstmp;
920#endif
921 linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
922 if (alev < finest_amr_lev) {
923 linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
924 *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
925 if (linop.isCellCentered()) {
926#ifdef AMREX_USE_EB
927 EB_average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
928#else
929 average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
930#endif
931 }
932 }
933 }
934
935
936#if (AMREX_SPACEDIM != 3)
937 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
938 linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
939 }
940#endif
941}
942
943template <typename MF>
944void
945MLMGT<MF>::apply (const Vector<MF*>& out, const Vector<MF*>& a_in)
946{
947 BL_PROFILE("MLMG::apply()");
948
949 Vector<MF*> in(namrlevs);
950 Vector<MF> in_raii(namrlevs);
951 Vector<MF> rh(namrlevs);
952 int nghost = 0;
953 IntVect ng_sol(1);
954 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
955
956 for (int alev = 0; alev < namrlevs; ++alev)
957 {
958 if (cf_strategy == CFStrategy::ghostnodes)
959 {
960 nghost = linop.getNGrow(alev);
961 in[alev] = a_in[alev];
962 }
963 else if (nGrowVect(*a_in[alev]) == ng_sol)
964 {
965 in[alev] = a_in[alev];
966 }
967 else
968 {
969 IntVect ng = ng_sol;
970 if (cf_strategy == CFStrategy::ghostnodes) { ng = IntVect(nghost); }
971 in_raii[alev] = linop.make(alev, 0, ng);
972 LocalCopy(in_raii[alev], *a_in[alev], 0, 0, ncomp, IntVect(nghost));
973 in[alev] = &(in_raii[alev]);
974 }
975 rh[alev] = linop.make(alev, 0, IntVect(nghost));
976 setVal(rh[alev], RT(0.0));
977 }
978
979 prepareLinOp();
980
981 for (int alev = 0; alev < namrlevs; ++alev) {
982 linop.applyInhomogNeumannTerm(alev, rh[alev]);
983 }
984
985 const auto& amrrr = linop.AMRRefRatio();
986
987 for (int alev = finest_amr_lev; alev >= 0; --alev) {
988 const MF* crse_bcdata = (alev > 0) ? in[alev-1] : nullptr;
989 linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
990 if (alev < finest_amr_lev) {
991 linop.reflux(alev, *out[alev], *in[alev], rh[alev],
992 *out[alev+1], *in[alev+1], rh[alev+1]);
993 if (linop.isCellCentered()) {
994 if constexpr (IsMultiFabLike_v<MF>) {
995#ifdef AMREX_USE_EB
996 EB_average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
997#else
998 average_down(*out[alev+1], *out[alev], 0, nComp(*out[alev]), amrrr[alev]);
999#endif
1000 } else {
1001 amrex::Abort("MLMG: TODO average_down for non-MultiFab");
1002 }
1003 }
1004 }
1005 }
1006
1007#if (AMREX_SPACEDIM != 3)
1008 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1009 linop.unapplyMetricTerm(alev, 0, *out[alev]);
1010 }
1011#endif
1012
1013 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1014 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
1015 Scale(*out[alev], RT(-1), 0, nComp(*out[alev]), nghost);
1016 }
1017}
1018
1019template <typename MF>
1020void
1022{
1023 precond_mode = true;
1024 linop.beginPrecondBC();
1025 apply(out, in);
1026 linop.endPrecondBC();
1027 precond_mode = false;
1028}
1029
1030template <typename MF>
1031template <typename AMF>
1032void
1034{
1035 BL_PROFILE("MLMG::prepareForSolve()");
1036
1037 AMREX_ASSERT(namrlevs <= a_sol.size());
1038 AMREX_ASSERT(namrlevs <= a_rhs.size());
1039
1040 timer.assign(ntimers, 0.0);
1041
1042 IntVect ng_rhs(0);
1043 IntVect ng_sol(1);
1044 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1045
1046 if (!linop_prepared) {
1047 linop.prepareForSolve();
1048 linop_prepared = true;
1049 } else if (linop.needsUpdate()) {
1050 linop.update();
1051
1052#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1053 hypre_solver.reset();
1054 hypre_bndry.reset();
1055 hypre_node_solver.reset();
1056#endif
1057
1058#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1059 petsc_solver.reset();
1060 petsc_bndry.reset();
1061#endif
1062 }
1063
1064 sol.resize(namrlevs);
1065 sol_is_alias.resize(namrlevs,false);
1066 for (int alev = 0; alev < namrlevs; ++alev)
1067 {
1068 if (cf_strategy == CFStrategy::ghostnodes)
1069 {
1070 if constexpr (std::is_same<AMF,MF>()) {
1071 sol[alev] = linop.makeAlias(*a_sol[alev]);
1072 sol_is_alias[alev] = true;
1073 } else {
1074 amrex::Abort("Type conversion not supported for CFStrategy::ghostnodes");
1075 }
1076 }
1077 else
1078 {
1079 if (nGrowVect(*a_sol[alev]) == ng_sol) {
1080 if constexpr (std::is_same<AMF,MF>()) {
1081 sol[alev] = linop.makeAlias(*a_sol[alev]);
1082 sol_is_alias[alev] = true;
1083 }
1084 }
1085 if (!sol_is_alias[alev]) {
1086 if (!solve_called) {
1087 sol[alev] = linop.make(alev, 0, ng_sol);
1088 }
1089 LocalCopy(sol[alev], *a_sol[alev], 0, 0, ncomp, IntVect(0));
1090 setBndry(sol[alev], RT(0.0), 0, ncomp);
1091 }
1092 }
1093 }
1094
1095 rhs.resize(namrlevs);
1096 for (int alev = 0; alev < namrlevs; ++alev)
1097 {
1098 if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs = IntVect(linop.getNGrow(alev)); }
1099 if (!solve_called) {
1100 rhs[alev] = linop.make(alev, 0, ng_rhs);
1101 }
1102 LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
1103 linop.applyMetricTerm(alev, 0, rhs[alev]);
1104 linop.unimposeNeumannBC(alev, rhs[alev]);
1105 linop.applyInhomogNeumannTerm(alev, rhs[alev]);
1106 linop.applyOverset(alev, rhs[alev]);
1107 if ( ! precond_mode) {
1108 bool r = linop.scaleRHS(alev, &(rhs[alev]));
1110 }
1111
1112#ifdef AMREX_USE_EB
1113 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(linop.Factory(alev));
1114 if (factory && !factory->isAllRegular()) {
1115 if constexpr (std::is_same<MF,MultiFab>()) {
1116 EB_set_covered(rhs[alev], 0, ncomp, 0, RT(0.0));
1117 EB_set_covered(sol[alev], 0, ncomp, 0, RT(0.0));
1118 } else {
1119 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1120 }
1121 }
1122#endif
1123 }
1124
1125 for (int falev = finest_amr_lev; falev > 0; --falev)
1126 {
1127 linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1128 }
1129
1130 // enforce solvability if appropriate
1131 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1132 {
1133 makeSolvable();
1134 }
1135
1136 IntVect ng = linop.getNGrowVectRestriction();
1137 if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1138 if (!solve_called) {
1139 linop.make(res, ng);
1140 linop.make(rescor, ng);
1141 }
1142 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1143 {
1144 const int nmglevs = linop.NMGLevels(alev);
1145 for (int mglev = 0; mglev < nmglevs; ++mglev)
1146 {
1147 setVal(res [alev][mglev], RT(0.0));
1148 setVal(rescor[alev][mglev], RT(0.0));
1149 }
1150 }
1151
1152 if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1153 cor.resize(namrlevs);
1154 for (int alev = 0; alev <= finest_amr_lev; ++alev)
1155 {
1156 const int nmglevs = linop.NMGLevels(alev);
1157 cor[alev].resize(nmglevs);
1158 for (int mglev = 0; mglev < nmglevs; ++mglev)
1159 {
1160 if (!solve_called) {
1161 IntVect _ng = ng;
1162 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1163 cor[alev][mglev] = linop.make(alev, mglev, _ng);
1164 }
1165 setVal(cor[alev][mglev], RT(0.0));
1166 }
1167 }
1168
1169 cor_hold.resize(std::max(namrlevs-1,1));
1170 {
1171 const int alev = 0;
1172 const int nmglevs = linop.NMGLevels(alev);
1173 cor_hold[alev].resize(nmglevs);
1174 for (int mglev = 0; mglev < nmglevs-1; ++mglev)
1175 {
1176 if (!solve_called) {
1177 IntVect _ng = ng;
1178 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev,mglev)); }
1179 cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1180 }
1181 setVal(cor_hold[alev][mglev], RT(0.0));
1182 }
1183 }
1184 for (int alev = 1; alev < finest_amr_lev; ++alev)
1185 {
1186 cor_hold[alev].resize(1);
1187 if (!solve_called) {
1188 IntVect _ng = ng;
1189 if (cf_strategy == CFStrategy::ghostnodes) { _ng=IntVect(linop.getNGrow(alev)); }
1190 cor_hold[alev][0] = linop.make(alev, 0, _ng);
1191 }
1192 setVal(cor_hold[alev][0], RT(0.0));
1193 }
1194
1195 if (linop.m_parent // no embedded N-Solve
1196 || !linop.supportNSolve())
1197 {
1198 do_nsolve = false;
1199 }
1200
1201 if (do_nsolve && ns_linop == nullptr)
1202 {
1203 prepareForNSolve();
1204 }
1205
1206 if (verbose >= 2) {
1207 amrex::Print() << print_ident << "MLMG: # of AMR levels: " << namrlevs << "\n"
1208 << print_ident << " # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1209 << "\n";
1210 if (ns_linop) {
1211 amrex::Print() << print_ident << " # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) << "\n"
1212 << print_ident << " # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() << "\n";
1213 }
1214 }
1215}
1216
1217template <typename MF>
1218void
1220{
1221 if (!linop_prepared) {
1222 linop.prepareForSolve();
1223 linop_prepared = true;
1224 } else if (linop.needsUpdate()) {
1225 linop.update();
1226 }
1227}
1228
1229template <typename MF>
1230void
1232{
1233 prepareLinOp();
1234 linop.preparePrecond();
1235}
1236
1237template <typename MF>
1238void
1240{
1241 if constexpr (IsMultiFabLike_v<MF>) {
1242 ns_linop = linop.makeNLinOp(nsolve_grid_size);
1243
1244 int nghost = 0;
1245 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1246
1247 const BoxArray& ba = (*ns_linop).m_grids[0][0];
1248 const DistributionMapping& dm =(*ns_linop).m_dmap[0][0];
1249
1250 int ng = 1;
1251 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1252 ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1253 ng = 0;
1254 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1255 ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng, MFInfo(), *(ns_linop->Factory(0,0)));
1256 setVal(*ns_sol, RT(0.0));
1257 setVal(*ns_rhs, RT(0.0));
1258
1259 ns_linop->setLevelBC(0, ns_sol.get());
1260
1261 ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1262 ns_mlmg->setVerbose(0);
1263 ns_mlmg->setFixedIter(1);
1264 ns_mlmg->setMaxFmgIter(20);
1265 ns_mlmg->setBottomSolver(BottomSolver::smoother);
1266 }
1267}
1268
1269// in : Residual (res) on the finest AMR level
1270// out : sol on all AMR levels
1271template <typename MF>
1272void MLMGT<MF>::oneIter (int iter)
1273{
1274 BL_PROFILE("MLMG::oneIter()");
1275
1276 for (int alev = finest_amr_lev; alev > 0; --alev)
1277 {
1278 miniCycle(alev);
1279
1280 IntVect nghost(0);
1281 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1282 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1283
1284 // compute residual for the coarse AMR level
1285 computeResWithCrseSolFineCor(alev-1,alev);
1286
1287 if (alev != finest_amr_lev) {
1288 std::swap(cor_hold[alev][0], cor[alev][0]); // save it for the up cycle
1289 }
1290 }
1291
1292 // coarsest amr level
1293 {
1294 // enforce solvability if appropriate
1295 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1296 {
1297 makeSolvable(0,0,res[0][0]);
1298 }
1299
1300 if (iter < max_fmg_iters) {
1301 mgFcycle();
1302 } else {
1303 mgVcycle(0, 0);
1304 }
1305
1306 IntVect nghost(0);
1307 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(0)); }
1308 LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1309 }
1310
1311 for (int alev = 1; alev <= finest_amr_lev; ++alev)
1312 {
1313 // (Fine AMR correction) = I(Coarse AMR correction)
1314 interpCorrection(alev);
1315
1316 IntVect nghost(0);
1317 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(alev)); }
1318 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1319
1320 if (alev != finest_amr_lev) {
1321 LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1322 }
1323
1324 // Update fine AMR level correction
1325 computeResWithCrseCorFineCor(alev);
1326
1327 miniCycle(alev);
1328
1329 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1330
1331 if (alev != finest_amr_lev) {
1332 LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1333 }
1334 }
1335
1336 linop.averageDownAndSync(sol);
1337}
1338
1339template <typename MF>
1340void
1342{
1343 BL_PROFILE("MLMG::miniCycle()");
1344 const int mglev = 0;
1345 mgVcycle(amrlev, mglev);
1346}
1347
1348// in : Residual (res)
1349// out : Correction (cor) from bottom to this function's local top
1350template <typename MF>
1351void
1352MLMGT<MF>::mgVcycle (int amrlev, int mglev_top)
1353{
1354 BL_PROFILE("MLMG::mgVcycle()");
1355
1356 const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1357
1358 for (int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1359 {
1360 BL_PROFILE_VAR("MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1361
1362 if (verbose >= 4)
1363 {
1364 RT norm = norminf(res[amrlev][mglev],0,ncomp,IntVect(0));
1365 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1366 << " DN: Norm before smooth " << norm << "\n";
1367 }
1368
1369 setVal(cor[amrlev][mglev], RT(0.0));
1370 bool skip_fillboundary = true;
1371 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary, nu1);
1372
1373 // rescor = res - L(cor)
1374 computeResOfCorrection(amrlev, mglev);
1375
1376 if (verbose >= 4)
1377 {
1378 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1379 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1380 << " DN: Norm after smooth " << norm << "\n";
1381 }
1382
1383 // res_crse = R(rescor_fine); this provides res/b to the level below
1384 linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1385 }
1386
1387 BL_PROFILE_VAR("MLMG::mgVcycle_bottom", blp_bottom);
1388 if (amrlev == 0)
1389 {
1390 if (verbose >= 4)
1391 {
1392 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1393 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1394 << " DN: Norm before bottom " << norm << "\n";
1395 }
1396 bottomSolve();
1397 if (verbose >= 4)
1398 {
1399 computeResOfCorrection(amrlev, mglev_bottom);
1400 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1401 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1402 << " UP: Norm after bottom " << norm << "\n";
1403 }
1404 }
1405 else
1406 {
1407 if (verbose >= 4)
1408 {
1409 RT norm = norminf(res[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1410 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1411 << " Norm before smooth " << norm << "\n";
1412 }
1413 setVal(cor[amrlev][mglev_bottom], RT(0.0));
1414 bool skip_fillboundary = true;
1415 linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1416 res[amrlev][mglev_bottom], skip_fillboundary, nu1);
1417 if (verbose >= 4)
1418 {
1419 computeResOfCorrection(amrlev, mglev_bottom);
1420 RT norm = norminf(rescor[amrlev][mglev_bottom],0,ncomp,IntVect(0));
1421 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev_bottom
1422 << " Norm after smooth " << norm << "\n";
1423 }
1424 }
1425 BL_PROFILE_VAR_STOP(blp_bottom);
1426
1427 for (int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1428 {
1429 BL_PROFILE_VAR("MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1430 // cor_fine += I(cor_crse)
1431 addInterpCorrection(amrlev, mglev);
1432 if (verbose >= 4)
1433 {
1434 computeResOfCorrection(amrlev, mglev);
1435 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1436 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1437 << " UP: Norm before smooth " << norm << "\n";
1438 }
1439 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], false, nu2);
1440
1441 if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1442
1443 if (verbose >= 4)
1444 {
1445 computeResOfCorrection(amrlev, mglev);
1446 RT norm = norminf(rescor[amrlev][mglev],0,ncomp,IntVect(0));
1447 amrex::Print() << print_ident << "AT LEVEL " << amrlev << " " << mglev
1448 << " UP: Norm after smooth " << norm << "\n";
1449 }
1450 }
1451}
1452
1453// FMG cycle on the coarsest AMR level.
1454// in: Residual on the top MG level (i.e., 0)
1455// out: Correction (cor) on all MG levels
1456template <typename MF>
1457void
1459{
1460 BL_PROFILE("MLMG::mgFcycle()");
1461
1462#ifdef AMREX_USE_EB
1463 auto* pf = linop.Factory(0);
1464 auto is_all_regular = [pf] () {
1465 const auto *const f = dynamic_cast<EBFArrayBoxFactory const*>(pf);
1466 if (f) {
1467 return f->isAllRegular();
1468 } else {
1469 return true;
1470 }
1471 };
1472 amrex::ignore_unused(pf, is_all_regular);
1473 AMREX_ASSERT(linop.isCellCentered() || is_all_regular());
1474#endif
1475
1476 const int amrlev = 0;
1477 const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1478 IntVect nghost(0);
1479 if (cf_strategy == CFStrategy::ghostnodes) { nghost = IntVect(linop.getNGrow(amrlev)); }
1480
1481 for (int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1482 {
1483 linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1484 }
1485
1486 bottomSolve();
1487
1488 for (int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1489 {
1490 // cor_fine = I(cor_crse)
1491 interpCorrection(amrlev, mglev);
1492
1493 // rescor = res - L(cor)
1494 computeResOfCorrection(amrlev, mglev);
1495 // res = rescor; this provides b to the vcycle below
1496 LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1497
1498 // save cor; do v-cycle; add the saved to cor
1499 std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1500 mgVcycle(amrlev, mglev);
1501 LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1502 }
1503}
1504
1505// At the true bottom of the coarsest AMR level.
1506// in : Residual (res) as b
1507// out : Correction (cor) as x
1508template <typename MF>
1509void
1511{
1512 if (do_nsolve)
1513 {
1514 NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1515 }
1516 else
1517 {
1518 actualBottomSolve();
1519 }
1520}
1521
1522template <typename MF>
1523void
1524MLMGT<MF>::NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs)
1525{
1526 BL_PROFILE("MLMG::NSolve()");
1527
1528 setVal(a_sol, RT(0.0));
1529
1530 MF const& res_bottom = res[0].back();
1531 if (BoxArray::SameRefs(boxArray(a_rhs),boxArray(res_bottom)) &&
1533 {
1534 LocalCopy(a_rhs, res_bottom, 0, 0, ncomp, IntVect(0));
1535 } else {
1536 setVal(a_rhs, RT(0.0));
1537 ParallelCopy(a_rhs, res_bottom, 0, 0, ncomp);
1538 }
1539
1540 a_solver.solve(Vector<MF*>{&a_sol}, Vector<MF const*>{&a_rhs},
1541 RT(-1.0), RT(-1.0));
1542
1543 linop.copyNSolveSolution(cor[0].back(), a_sol);
1544}
1545
1546template <typename MF>
1547void
1549{
1550 BL_PROFILE("MLMG::actualBottomSolve()");
1551
1552 if (!linop.isBottomActive()) { return; }
1553
1554 auto bottom_start_time = amrex::second();
1555
1556 ParallelContext::push(linop.BottomCommunicator());
1557
1558 const int amrlev = 0;
1559 const int mglev = linop.NMGLevels(amrlev) - 1;
1560 auto& x = cor[amrlev][mglev];
1561 auto& b = res[amrlev][mglev];
1562
1563 setVal(x, RT(0.0));
1564
1565 if (bottom_solver == BottomSolver::smoother)
1566 {
1567 bool skip_fillboundary = true;
1568 linop.smooth(amrlev, mglev, x, b, skip_fillboundary, nuf);
1569 }
1570 else
1571 {
1572 MF* bottom_b = &b;
1573 MF raii_b;
1574 if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1575 {
1576 const IntVect ng = nGrowVect(b);
1577 raii_b = linop.make(amrlev, mglev, ng);
1578 LocalCopy(raii_b, b, 0, 0, ncomp, ng);
1579 bottom_b = &raii_b;
1580
1581 makeSolvable(amrlev,mglev,*bottom_b);
1582 }
1583
1584 if (bottom_solver == BottomSolver::hypre)
1585 {
1586#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1587 if constexpr (std::is_same<MF,MultiFab>()) {
1588 bottomSolveWithHypre(x, *bottom_b);
1589 } else
1590#endif
1591 {
1592 amrex::Abort("Using Hypre as bottom solver not supported in this case");
1593 }
1594 }
1595 else if (bottom_solver == BottomSolver::petsc)
1596 {
1597#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1598 if constexpr (std::is_same<MF,MultiFab>()) {
1599 bottomSolveWithPETSc(x, *bottom_b);
1600 } else
1601#endif
1602 {
1603 amrex::Abort("Using PETSc as bottom solver not supported in this case");
1604 }
1605 }
1606 else
1607 {
1608 typename MLCGSolverT<MF>::Type cg_type;
1609 if (bottom_solver == BottomSolver::cg ||
1610 bottom_solver == BottomSolver::cgbicg) {
1611 cg_type = MLCGSolverT<MF>::Type::CG;
1612 } else {
1614 }
1615
1616 int ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1617
1618 if (ret != 0 && (bottom_solver == BottomSolver::cgbicg ||
1619 bottom_solver == BottomSolver::bicgcg))
1620 {
1621 if (bottom_solver == BottomSolver::cgbicg) {
1622 cg_type = MLCGSolverT<MF>::Type::BiCGStab; // switch to bicg
1623 } else {
1624 cg_type = MLCGSolverT<MF>::Type::CG; // switch to cg
1625 }
1626 setVal(cor[amrlev][mglev], RT(0.0));
1627 ret = bottomSolveWithCG(x, *bottom_b, cg_type);
1628 if (ret == 0) { // switch permanently
1629 if (cg_type == MLCGSolverT<MF>::Type::CG) {
1630 bottom_solver = BottomSolver::cg;
1631 } else {
1632 bottom_solver = BottomSolver::bicgstab;
1633 }
1634 }
1635 }
1636
1637 // If the bottom solve failed then set the correction to zero
1638 if (ret != 0 && ret != 9) {
1639 setVal(cor[amrlev][mglev], RT(0.0));
1640 }
1641 const int n = (ret==0) ? nub : nuf;
1642 linop.smooth(amrlev, mglev, x, b, false, n);
1643 }
1644 }
1645
1647
1648 if (! timer.empty()) {
1649 timer[bottom_time] += amrex::second() - bottom_start_time;
1650 }
1651}
1652
1653template <typename MF>
1654int
1656{
1657 MLCGSolverT<MF> cg_solver(linop);
1658 cg_solver.setSolver(type);
1659 cg_solver.setVerbose(bottom_verbose);
1660 cg_solver.setPrintIdentation(print_ident);
1661 cg_solver.setMaxIter(bottom_maxiter);
1662 cg_solver.setInitSolnZeroed(true);
1663 if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.setNGhost(linop.getNGrow()); }
1664
1665 int ret = cg_solver.solve(x, b, bottom_reltol, bottom_abstol);
1666 if (ret != 0 && verbose > 1) {
1667 amrex::Print() << print_ident << "MLMG: Bottom solve failed.\n";
1668 }
1669 m_niters_cg.push_back(cg_solver.getNumIters());
1670 return ret;
1671}
1672
1673// Compute multi-level Residual (res) up to amrlevmax.
1674template <typename MF>
1675void
1677{
1678 BL_PROFILE("MLMG::computeMLResidual()");
1679
1680 const int mglev = 0;
1681 for (int alev = amrlevmax; alev >= 0; --alev) {
1682 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1683 linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1684 if (alev < finest_amr_lev) {
1685 linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1686 res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1687 }
1688 }
1689}
1690
1691// Compute single AMR level residual without masking.
1692template <typename MF>
1693void
1695{
1696 BL_PROFILE("MLMG::computeResidual()");
1697 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) : nullptr;
1698 linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1699}
1700
1701// Compute coarse AMR level composite residual with coarse solution and fine correction
1702template <typename MF>
1703void
1705{
1706 BL_PROFILE("MLMG::computeResWithCrseSolFineCor()");
1707
1708 IntVect nghost(0);
1709 if (cf_strategy == CFStrategy::ghostnodes) {
1710 nghost = IntVect(std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1711 }
1712
1713 MF& crse_sol = sol[calev];
1714 const MF& crse_rhs = rhs[calev];
1715 MF& crse_res = res[calev][0];
1716
1717 MF& fine_sol = sol[falev];
1718 const MF& fine_rhs = rhs[falev];
1719 MF& fine_cor = cor[falev][0];
1720 MF& fine_res = res[falev][0];
1721 MF& fine_rescor = rescor[falev][0];
1722
1723 const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) : nullptr;
1724 linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1725
1726 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1727 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1728
1729 linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1730
1731 linop.avgDownResAmr(calev, crse_res, fine_res);
1732}
1733
1734// Compute fine AMR level residual fine_res = fine_res - L(fine_cor) with coarse providing BC.
1735template <typename MF>
1736void
1738{
1739 BL_PROFILE("MLMG::computeResWithCrseCorFineCor()");
1740
1741 IntVect nghost(0);
1742 if (cf_strategy == CFStrategy::ghostnodes) {
1743 nghost = IntVect(linop.getNGrow(falev));
1744 }
1745
1746 const MF& crse_cor = cor[falev-1][0];
1747
1748 MF& fine_cor = cor [falev][0];
1749 MF& fine_res = res [falev][0];
1750 MF& fine_rescor = rescor[falev][0];
1751
1752 // fine_rescor = fine_res - L(fine_cor)
1753 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1754 BCMode::Inhomogeneous, &crse_cor);
1755 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1756}
1757
1758// Interpolate correction from coarse to fine AMR level.
1759template <typename MF>
1760void
1762{
1763 BL_PROFILE("MLMG::interpCorrection_1");
1764
1765 IntVect nghost(0);
1766 if (cf_strategy == CFStrategy::ghostnodes) {
1767 nghost = IntVect(linop.getNGrow(alev));
1768 }
1769
1770 MF & crse_cor = cor[alev-1][0];
1771 MF & fine_cor = cor[alev ][0];
1772
1773 const Geometry& crse_geom = linop.Geom(alev-1,0);
1774
1775 int ng_src = 0;
1776 int ng_dst = linop.isCellCentered() ? 1 : 0;
1777 if (cf_strategy == CFStrategy::ghostnodes)
1778 {
1779 ng_src = linop.getNGrow(alev-1);
1780 ng_dst = linop.getNGrow(alev-1);
1781 if constexpr (IsMultiFabLike_v<MF>) {
1782 crse_cor.FillBoundary(0, ncomp, IntVect(ng_src), crse_geom.periodicity());
1783 } else {
1784 amrex::Abort("MLMG: CFStrategy::ghostnodes not supported for non-MultiFab like types");
1785 }
1786 }
1787
1788 MF cfine = linop.makeCoarseAmr(alev, IntVect(ng_dst));
1789 setVal(cfine, RT(0.0));
1790 ParallelCopy(cfine, crse_cor, 0, 0, ncomp, IntVect(ng_src), IntVect(ng_dst),
1791 crse_geom.periodicity());
1792
1793 linop.interpolationAmr(alev, fine_cor, cfine, nghost); // NOLINT(readability-suspicious-call-argument)
1794}
1795
1796// Interpolate correction between MG levels
1797// inout: Correction (cor) on coarse MG lev. (out due to FillBoundary)
1798// out : Correction (cor) on fine MG lev.
1799template <typename MF>
1800void
1801MLMGT<MF>::interpCorrection (int alev, int mglev)
1802{
1803 BL_PROFILE("MLMG::interpCorrection_2");
1804
1805 MF& crse_cor = cor[alev][mglev+1];
1806 MF& fine_cor = cor[alev][mglev ];
1807 linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1808}
1809
1810// (Fine MG level correction) += I(Coarse MG level correction)
1811template <typename MF>
1812void
1814{
1815 BL_PROFILE("MLMG::addInterpCorrection()");
1816
1817 const MF& crse_cor = cor[alev][mglev+1];
1818 MF& fine_cor = cor[alev][mglev ];
1819
1820 MF cfine;
1821 const MF* cmf;
1822
1823 if (linop.isMFIterSafe(alev, mglev, mglev+1))
1824 {
1825 cmf = &crse_cor;
1826 }
1827 else
1828 {
1829 cfine = linop.makeCoarseMG(alev, mglev, IntVect(0));
1830 ParallelCopy(cfine, crse_cor, 0, 0, ncomp);
1831 cmf = &cfine;
1832 }
1833
1834 linop.interpolation(alev, mglev, fine_cor, *cmf);
1835}
1836
1837// Compute rescor = res - L(cor)
1838// in : res
1839// inout: cor (out due to FillBoundary in linop.correctionResidual)
1840// out : rescor
1841template <typename MF>
1842void
1844{
1845 BL_PROFILE("MLMG:computeResOfCorrection()");
1846 MF & x = cor[amrlev][mglev];
1847 const MF& b = res[amrlev][mglev];
1848 MF & r = rescor[amrlev][mglev];
1849 linop.correctionResidual(amrlev, mglev, r, x, b, BCMode::Homogeneous);
1850}
1851
1852// Compute single-level masked inf-norm of Residual (res).
1853template <typename MF>
1854auto
1855MLMGT<MF>::ResNormInf (int alev, bool local) -> RT
1856{
1857 BL_PROFILE("MLMG::ResNormInf()");
1858 return linop.normInf(alev, res[alev][0], local);
1859}
1860
1861// Computes multi-level masked inf-norm of Residual (res).
1862template <typename MF>
1863auto
1864MLMGT<MF>::MLResNormInf (int alevmax, bool local) -> RT
1865{
1866 BL_PROFILE("MLMG::MLResNormInf()");
1867 RT r = RT(0.0);
1868 for (int alev = 0; alev <= alevmax; ++alev)
1869 {
1870 r = std::max(r, ResNormInf(alev,true));
1871 }
1873 return r;
1874}
1875
1876// Compute multi-level masked inf-norm of RHS (rhs).
1877template <typename MF>
1878auto
1880{
1881 BL_PROFILE("MLMG::MLRhsNormInf()");
1882 RT r = RT(0.0);
1883 for (int alev = 0; alev <= finest_amr_lev; ++alev) {
1884 auto t = linop.normInf(alev, rhs[alev], true);
1885 r = std::max(r, t);
1886 }
1888 return r;
1889}
1890
1891template <typename MF>
1892void
1894{
1895 auto const& offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1896 if (verbose >= 4) {
1897 for (int c = 0; c < ncomp; ++c) {
1898 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c] << " from rhs component "
1899 << c << "\n";
1900 }
1901 }
1902 for (int alev = 0; alev < namrlevs; ++alev) {
1903 linop.fixSolvabilityByOffset(alev, 0, rhs[alev], offset);
1904 }
1905}
1906
1907template <typename MF>
1908void
1909MLMGT<MF>::makeSolvable (int amrlev, int mglev, MF& mf)
1910{
1911 auto const& offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1912 if (verbose >= 4) {
1913 for (int c = 0; c < ncomp; ++c) {
1914 amrex::Print() << print_ident << "MLMG: Subtracting " << offset[c]
1915 << " from mf component c = " << c
1916 << " on level (" << amrlev << ", " << mglev << ")\n";
1917 }
1918 }
1919 linop.fixSolvabilityByOffset(amrlev, mglev, mf, offset);
1920}
1921
1922#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1923template <typename MF>
1924template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1925void
1926MLMGT<MF>::bottomSolveWithHypre (MF& x, const MF& b)
1927{
1928 const int amrlev = 0;
1929 const int mglev = linop.NMGLevels(amrlev) - 1;
1930
1931 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithHypre doesn't work with ncomp > 1");
1932
1933 if (linop.isCellCentered())
1934 {
1935 if (hypre_solver == nullptr) // We should reuse the setup
1936 {
1937 hypre_solver = linop.makeHypre(hypre_interface);
1938
1939 hypre_solver->setVerbose(bottom_verbose);
1940 if (hypre_interface == amrex::Hypre::Interface::ij) {
1941 hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1942 } else {
1943 hypre_solver->setHypreOldDefault(hypre_old_default);
1944 hypre_solver->setHypreRelaxType(hypre_relax_type);
1945 hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1946 hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1947 hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1948 }
1949
1950 const BoxArray& ba = linop.m_grids[amrlev].back();
1951 const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1952 const Geometry& geom = linop.m_geom[amrlev].back();
1953
1954 hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1955 hypre_bndry->setHomogValues();
1956 const Real* dx = linop.m_geom[0][0].CellSize();
1957 IntVect crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
1958 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1959 0.5*dx[1]*crse_ratio[1],
1960 0.5*dx[2]*crse_ratio[2]));
1961 hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
1962 linop.m_coarse_fine_bc_type);
1963 }
1964
1965 // IJ interface understands absolute tolerance API of hypre
1966 amrex::Real hypre_abstol =
1967 (hypre_interface == amrex::Hypre::Interface::ij)
1968 ? bottom_abstol : Real(-1.0);
1969 hypre_solver->solve(
1970 x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1971 linop.getMaxOrder());
1972 }
1973 else
1974 {
1975 if (hypre_node_solver == nullptr)
1976 {
1977 hypre_node_solver =
1978 linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1979 }
1980 hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1981 }
1982
1983 // For singular problems there may be a large constant added to all values of the solution
1984 // For precision reasons we enforce that the average of the correction from hypre is 0
1985 if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1986 {
1987 makeSolvable(amrlev, mglev, x);
1988 }
1989}
1990#endif
1991
1992#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1993template <typename MF>
1994template <class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,int>>
1995void
1996MLMGT<MF>::bottomSolveWithPETSc (MF& x, const MF& b)
1997{
1998 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(ncomp == 1, "bottomSolveWithPETSc doesn't work with ncomp > 1");
1999
2000 if(petsc_solver == nullptr)
2001 {
2002 petsc_solver = linop.makePETSc();
2003 petsc_solver->setVerbose(bottom_verbose);
2004
2005 const BoxArray& ba = linop.m_grids[0].back();
2006 const DistributionMapping& dm = linop.m_dmap[0].back();
2007 const Geometry& geom = linop.m_geom[0].back();
2008
2009 petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
2010 petsc_bndry->setHomogValues();
2011 const Real* dx = linop.m_geom[0][0].CellSize();
2012 auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio : IntVect(1);
2013 RealVect bclocation(AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
2014 0.5*dx[1]*crse_ratio[1],
2015 0.5*dx[2]*crse_ratio[2]));
2016 petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc, IntVect(-1), bclocation,
2017 linop.m_coarse_fine_bc_type);
2018 }
2019 petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
2020 linop.getMaxOrder());
2021}
2022#endif
2023
2024template <typename MF>
2025void
2026MLMGT<MF>::checkPoint (const Vector<MultiFab*>& a_sol,
2027 const Vector<MultiFab const*>& a_rhs,
2028 RT a_tol_rel, RT a_tol_abs, const char* a_file_name) const
2029{
2030 std::string file_name(a_file_name);
2031 UtilCreateCleanDirectory(file_name, false);
2032
2034 {
2035 std::string HeaderFileName(std::string(a_file_name)+"/Header");
2036 std::ofstream HeaderFile;
2037 HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
2038 std::ofstream::trunc |
2039 std::ofstream::binary);
2040 if( ! HeaderFile.good()) {
2041 FileOpenFailed(HeaderFileName);
2042 }
2043
2044 HeaderFile.precision(17);
2045
2046 std::string norm_name = getEnumNameString(norm_type);
2047
2048 HeaderFile << linop.name() << "\n"
2049 << "a_tol_rel = " << a_tol_rel << "\n"
2050 << "a_tol_abs = " << a_tol_abs << "\n"
2051 << "verbose = " << verbose << "\n"
2052 << "max_iters = " << max_iters << "\n"
2053 << "nu1 = " << nu1 << "\n"
2054 << "nu2 = " << nu2 << "\n"
2055 << "nuf = " << nuf << "\n"
2056 << "nub = " << nub << "\n"
2057 << "max_fmg_iters = " << max_fmg_iters << "\n"
2058 << "bottom_solver = " << static_cast<int>(bottom_solver) << "\n"
2059 << "bottom_verbose = " << bottom_verbose << "\n"
2060 << "bottom_maxiter = " << bottom_maxiter << "\n"
2061 << "bottom_reltol = " << bottom_reltol << "\n"
2062 << "convergence_norm = " << norm_name << "\n"
2063 << "namrlevs = " << namrlevs << "\n"
2064 << "finest_amr_lev = " << finest_amr_lev << "\n"
2065 << "linop_prepared = " << linop_prepared << "\n"
2066 << "solve_called = " << solve_called << "\n";
2067
2068 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2069 UtilCreateCleanDirectory(file_name+"/Level_"+std::to_string(ilev), false);
2070 }
2071 }
2072
2074
2075 for (int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2076 VisMF::Write(*a_sol[ilev], file_name+"/Level_"+std::to_string(ilev)+"/sol");
2077 VisMF::Write(*a_rhs[ilev], file_name+"/Level_"+std::to_string(ilev)+"/rhs");
2078 }
2079
2080 linop.checkPoint(file_name+"/linop");
2081}
2082
2083template <typename MF>
2084void
2086{
2087 print_ident.resize(print_ident.size()+4, ' ');
2088}
2089
2090template <typename MF>
2091void
2093{
2094 if (print_ident.size() > 4) {
2095 print_ident.resize(print_ident.size()-4, ' ');
2096 } else {
2097 print_ident.clear();
2098 }
2099}
2100
2101extern template class MLMGT<MultiFab>;
2102
2105
2106}
2107
2108#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition AMReX_BLProfiler.H:560
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:208
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
Print on all processors of the default communicator.
Definition AMReX_Print.H:117
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:840
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition AMReX_DistributionMapping.H:189
Definition AMReX_EBFabFactory.H:25
bool isAllRegular() const noexcept
Definition AMReX_EBFabFactory.cpp:148
Solve using GMRES with multigrid as preconditioner.
Definition AMReX_GMRES_MLMG.H:22
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:356
Interface
Definition AMReX_Hypre.H:21
__host__ __device__ constexpr bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:425
__host__ __device__ constexpr IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition AMReX_IntVect.H:775
__host__ static __device__ constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:698
Definition AMReX_MLCGSolver.H:12
void setSolver(Type _typ) noexcept
Definition AMReX_MLCGSolver.H:28
void setVerbose(int _verbose)
Definition AMReX_MLCGSolver.H:39
int getNumIters() const noexcept
Definition AMReX_MLCGSolver.H:64
void setInitSolnZeroed(bool _sol_zeroed)
Definition AMReX_MLCGSolver.H:53
void setPrintIdentation(std::string s)
Definition AMReX_MLCGSolver.H:45
int solve(MF &solnL, const MF &rhsL, RT eps_rel, RT eps_abs)
Definition AMReX_MLCGSolver.H:89
Type
Definition AMReX_MLCGSolver.H:18
void setNGhost(int _nghost)
Definition AMReX_MLCGSolver.H:56
void setMaxIter(int _maxiter)
Definition AMReX_MLCGSolver.H:42
Definition AMReX_MLLinOp.H:102
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLLinOp.H:112
typename FabDataType< MF >::value_type RT
Definition AMReX_MLLinOp.H:113
Definition AMReX_MLMG.H:22
Definition AMReX_MLMG.H:17
void prepareForFluxes(Vector< MF const * > const &a_sol)
Definition AMReX_MLMG.H:630
void setBottomVerbose(int v) noexcept
Definition AMReX_MLMG.H:148
void setMaxFmgIter(int n) noexcept
Definition AMReX_MLMG.H:136
RT MLResNormInf(int alevmax, bool local=false)
Definition AMReX_MLMG.H:1864
RT MLRhsNormInf(bool local=false)
Definition AMReX_MLMG.H:1879
void setNoGpuSync(bool do_not_sync) noexcept
Definition AMReX_MLMG.H:166
MLMGT(MLMGT< MF > &&)=delete
void actualBottomSolve()
Definition AMReX_MLMG.H:1548
MF MFType
Definition AMReX_MLMG.H:30
BottomSolver getBottomSolver() const noexcept
Definition AMReX_MLMG.H:146
void setPreSmooth(int n) noexcept
Definition AMReX_MLMG.H:140
void setBottomToleranceAbs(RT t) noexcept
Definition AMReX_MLMG.H:151
RT getFinalResidual() const noexcept
Definition AMReX_MLMG.H:246
void interpCorrection(int alev)
Definition AMReX_MLMG.H:1761
void getEBFluxes(const Vector< MF * > &a_eb_flux)
Definition AMReX_MLMG.H:851
void getGradSolution(const Vector< Array< AMF *, 3 > > &a_grad_sol, Location a_loc=Location::FaceCenter)
Definition AMReX_MLMG.H:641
void setBottomSmooth(int n) noexcept
Definition AMReX_MLMG.H:143
void setNSolve(int flag) noexcept
Definition AMReX_MLMG.H:163
int getBottomVerbose() const
Definition AMReX_MLMG.H:128
void computeResOfCorrection(int amrlev, int mglev)
Definition AMReX_MLMG.H:1843
void applyPrecond(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in) as a preconditioner
Definition AMReX_MLMG.H:1021
void setCFStrategy(CFStrategy a_cf_strategy) noexcept
Definition AMReX_MLMG.H:147
void computeResWithCrseCorFineCor(int falev)
Definition AMReX_MLMG.H:1737
void NSolve(MLMGT< MF > &a_solver, MF &a_sol, MF &a_rhs)
Definition AMReX_MLMG.H:1524
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLMG.H:35
void apply(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in). Note that, if no actual solve is needed, one could turn off multigrid coarsening by cons...
Definition AMReX_MLMG.H:945
void getFluxes(const Vector< Array< AMF *, 3 > > &a_flux, Location a_loc=Location::FaceCenter)
For (alpha * a - beta * (del dot b grad)) phi = rhs, flux means -b grad phi
Definition AMReX_MLMG.H:672
void setNSolveGridSize(int s) noexcept
Definition AMReX_MLMG.H:164
void setVerbose(int v) noexcept
Definition AMReX_MLMG.H:134
void computeMLResidual(int amrlevmax)
Definition AMReX_MLMG.H:1676
RT getInitResidual() const noexcept
Definition AMReX_MLMG.H:244
int getNumIters() const noexcept
Definition AMReX_MLMG.H:249
void setPostSmooth(int n) noexcept
Definition AMReX_MLMG.H:141
void mgVcycle(int amrlev, int mglev)
Definition AMReX_MLMG.H:1352
void prepareForNSolve()
Definition AMReX_MLMG.H:1239
RT precond(Vector< MF * > const &a_sol, Vector< MF const * > const &a_rhs, RT a_tol_rel, RT a_tol_abs)
Definition AMReX_MLMG.H:612
void makeSolvable()
Definition AMReX_MLMG.H:1893
void setBottomSolver(BottomSolver s) noexcept
Definition AMReX_MLMG.H:145
void preparePrecond()
Definition AMReX_MLMG.H:1231
void incPrintIdentation()
Definition AMReX_MLMG.H:2085
typename MLLinOpT< MF >::RT RT
Definition AMReX_MLMG.H:32
void setThrowException(bool t) noexcept
Definition AMReX_MLMG.H:133
void decPrintIdentation()
Definition AMReX_MLMG.H:2092
void setFixedIter(int nit) noexcept
Definition AMReX_MLMG.H:137
Vector< RT > const & getResidualHistory() const noexcept
Definition AMReX_MLMG.H:248
void prepareLinOp()
Definition AMReX_MLMG.H:1219
void setPrecondIter(int nit) noexcept
Definition AMReX_MLMG.H:138
CFStrategy
Definition AMReX_MLMG.H:38
void prepareForSolve(Vector< AMF * > const &a_sol, Vector< AMF const * > const &a_rhs)
Definition AMReX_MLMG.H:1033
int bottomSolveWithCG(MF &x, const MF &b, typename MLCGSolverT< MF >::Type type)
Definition AMReX_MLMG.H:1655
void setAlwaysUseBNorm(int flag) noexcept
Definition AMReX_MLMG.H:371
void compResidual(const Vector< MF * > &a_res, const Vector< MF * > &a_sol, const Vector< MF const * > &a_rhs)
Definition AMReX_MLMG.H:877
void miniCycle(int amrlev)
Definition AMReX_MLMG.H:1341
RT solve(std::initializer_list< AMF * > a_sol, std::initializer_list< AMF const * > a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setFinalFillBC(int flag) noexcept
Definition AMReX_MLMG.H:159
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLMG.H:34
void setConvergenceNormType(MLMGNormType norm) noexcept
Definition AMReX_MLMG.H:157
void computeResWithCrseSolFineCor(int calev, int falev)
Definition AMReX_MLMG.H:1704
MLMGT< MF > & operator=(MLMGT< MF > const &)=delete
MLMGT(MLLinOpT< MF > &a_lp)
Definition AMReX_MLMG.H:362
void computeResidual(int alev)
Definition AMReX_MLMG.H:1694
MLLinOpT< MF > & getLinOp()
Definition AMReX_MLMG.H:252
typename MLLinOpT< MF >::FAB FAB
Definition AMReX_MLMG.H:31
RT getBottomToleranceAbs() const noexcept
Definition AMReX_MLMG.H:152
int numAMRLevels() const noexcept
Definition AMReX_MLMG.H:161
MLMGT(MLMGT< MF > const &)=delete
void mgFcycle()
Definition AMReX_MLMG.H:1458
RT getInitRHS() const noexcept
Definition AMReX_MLMG.H:242
RT ResNormInf(int alev, bool local=false)
Definition AMReX_MLMG.H:1855
Vector< int > const & getNumCGIters() const noexcept
Definition AMReX_MLMG.H:250
void bottomSolve()
Definition AMReX_MLMG.H:1510
void setBottomTolerance(RT t) noexcept
Definition AMReX_MLMG.H:150
void setFinalSmooth(int n) noexcept
Definition AMReX_MLMG.H:142
void addInterpCorrection(int alev, int mglev)
Definition AMReX_MLMG.H:1813
int getVerbose() const
Definition AMReX_MLMG.H:127
RT solve(const Vector< AMF * > &a_sol, const Vector< AMF const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setBottomMaxIter(int n) noexcept
Definition AMReX_MLMG.H:149
void oneIter(int iter)
Definition AMReX_MLMG.H:1272
void setMaxIter(int n) noexcept
Definition AMReX_MLMG.H:135
This class provides the user with a few print options.
Definition AMReX_Print.H:35
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
static Long Write(const FabArray< FArrayBox > &mf, const std::string &name, VisMF::How how=NFiles, bool set_ghost=false)
Write a FabArray<FArrayBox> to disk in a "smart" way. Returns the total number of bytes written on th...
Definition AMReX_VisMF.cpp:979
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
amrex_long Long
Definition AMReX_INT.H:30
std::array< T, N > Array
Definition AMReX_Array.H:26
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
bool setNoSyncRegion(bool b) noexcept
Definition AMReX_GpuControl.H:158
bool setSingleStreamRegion(bool b) noexcept
Definition AMReX_GpuControl.H:154
void push(MPI_Comm c)
Definition AMReX_ParallelContext.H:102
void BarrierSub() noexcept
Definition AMReX_ParallelContext.H:88
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
void pop()
Note that it's the user's responsibility to free the MPI_Comm.
Definition AMReX_ParallelContext.H:108
bool IOProcessorSub() noexcept
Am IO processor for current frame?
Definition AMReX_ParallelContext.H:80
int verbose
Definition AMReX.cpp:105
Definition AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:2029
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2854
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:137
void EB_average_face_to_cellcenter(MultiFab &ccmf, int dcomp, const Array< MultiFab const *, 3 > &fmf)
Definition AMReX_EBMultiFabUtil.cpp:806
std::array< T const *, 3 > GetArrOfConstPtrs(const std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1017
__host__ __device__ T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:349
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2869
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
std::string getEnumNameString(T const &v)
Definition AMReX_Enum.H:156
IntVect nGrowVect(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2859
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, IntVect const &ng_vect)
Definition AMReX_MultiFabUtil.cpp:155
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition AMReX_FabArrayUtility.H:1950
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition AMReX_FabArrayUtility.H:1958
double second() noexcept
Definition AMReX_Utility.cpp:940
std::array< T *, 3 > GetArrOfPtrs(std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1005
void UtilCreateCleanDirectory(const std::string &path, bool callbarrier=true)
Create a new directory, renaming the old one if it exists.
Definition AMReX_Utility.cpp:167
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
void computeResidual(AlgVector< T, AllocV > &res, SpMatrix< T, AllocM > const &A, AlgVector< T, AllocV > const &x, AlgVector< T, AllocV > const &b)
res = b - A*x
Definition AMReX_SpMV.H:226
BottomSolver
Definition AMReX_MLLinOp.H:31
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
RealVectND< 3 > RealVect
Definition AMReX_ParmParse.H:35
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition AMReX_FabArrayUtility.H:1934
Vector< T * > GetVecOfPtrs(Vector< T > &a)
Definition AMReX_Vector.H:64
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition AMReX_FabArrayUtility.H:1941
Vector< std::array< T *, 3 > > GetVecOfArrOfPtrs(const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
Definition AMReX_Vector.H:141
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
MLMGNormType
Definition AMReX_MLMG.H:12
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:2019
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition AMReX_FabArrayUtility.H:1927
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2864
BCMode
Definition AMReX_MLLinOp.H:88
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66