3 #include <AMReX_Config.H>
10 template <
typename MF>
16 :
public std::runtime_error
19 using std::runtime_error::runtime_error;
44 template <
typename AMF>
46 RT a_tol_rel,
RT a_tol_abs,
const char* checkpoint_file =
nullptr);
48 template <
typename AMF>
49 RT solve (std::initializer_list<AMF*> a_sol,
50 std::initializer_list<AMF const*> a_rhs,
51 RT a_tol_rel,
RT a_tol_abs,
const char* checkpoint_file =
nullptr);
53 template <
typename AMF>
55 Location a_loc = Location::FaceCenter);
57 template <
typename AMF>
59 Location a_loc = Location::FaceCenter);
64 template <
typename AMF>
66 Location a_loc = Location::FaceCenter);
68 template <
typename AMF>
70 Location a_loc = Location::FaceCenter);
72 template <
typename AMF>
75 Location a_loc = Location::FaceCenter);
77 template <
typename AMF>
79 std::initializer_list<AMF*> a_sol,
80 Location a_loc = Location::FaceCenter);
82 template <
typename AMF>
84 Location a_loc = Location::CellCenter);
86 template <
typename AMF>
87 void getFluxes (std::initializer_list<AMF*> a_flux,
88 Location a_loc = Location::CellCenter);
90 template <
typename AMF>
93 Location a_loc = Location::CellCenter);
95 template <
typename AMF>
96 void getFluxes (std::initializer_list<AMF*> a_flux,
97 std::initializer_list<AMF*> a_sol,
98 Location a_loc = Location::CellCenter);
144 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
155 void setHypreOptionsNamespace(
const std::string& prefix) noexcept
157 hypre_options_namespace = prefix;
160 void setHypreOldDefault (
bool l) noexcept {hypre_old_default = l;}
161 void setHypreRelaxType (
int n) noexcept {hypre_relax_type = n;}
162 void setHypreRelaxOrder (
int n) noexcept {hypre_relax_order = n;}
163 void setHypreNumSweeps (
int n) noexcept {hypre_num_sweeps = n;}
164 void setHypreStrongThreshold (Real t) noexcept {hypre_strong_threshold = t;}
169 template <
typename AMF>
170 void prepareForSolve (Vector<AMF*>
const& a_sol, Vector<AMF const*>
const& a_rhs);
184 void mgVcycle (
int amrlev,
int mglev);
188 void NSolve (MLMGT<MF>& a_solver, MF& a_sol, MF& a_rhs);
208 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
209 template <
class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,
int> = 0>
210 void bottomSolveWithHypre (MF& x,
const MF& b);
213 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
214 template <
class TMF=MF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,
int> = 0>
215 void bottomSolveWithPETSc (MF& x,
const MF& b);
275 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
280 std::unique_ptr<Hypre> hypre_solver;
281 std::unique_ptr<MLMGBndryT<MF>> hypre_bndry;
282 std::unique_ptr<HypreNodeLap> hypre_node_solver;
284 std::string hypre_options_namespace =
"hypre";
285 bool hypre_old_default =
true;
286 int hypre_relax_type = 6;
287 int hypre_relax_order = 1;
288 int hypre_num_sweeps = 2;
289 Real hypre_strong_threshold = 0.25;
293 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
294 std::unique_ptr<PETScABecLap> petsc_solver;
295 std::unique_ptr<MLMGBndryT<MF>> petsc_bndry;
329 RT a_tol_rel,
RT a_tol_abs,
const char* a_file_name)
const;
333 template <
typename MF>
335 : linop(a_lp), ncomp(a_lp.getNComp()), namrlevs(a_lp.NAMRLevels()),
336 finest_amr_lev(a_lp.NAMRLevels()-1)
341 template <
typename MF>
342 template <
typename AMF>
345 std::initializer_list<AMF const*> a_rhs,
346 RT a_tol_rel,
RT a_tol_abs,
const char* checkpoint_file) ->
RT
350 a_tol_rel, a_tol_abs, checkpoint_file);
353 template <
typename MF>
354 template <
typename AMF>
357 RT a_tol_rel,
RT a_tol_abs,
const char* checkpoint_file) ->
RT
361 if constexpr (std::is_same<AMF,MultiFab>()) {
362 if (checkpoint_file !=
nullptr) {
363 checkPoint(a_sol, a_rhs, a_tol_rel, a_tol_abs, checkpoint_file);
368 bottom_solver = linop.getDefaultBottomSolver();
371 #if (defined(AMREX_USE_HYPRE) || defined(AMREX_USE_PETSC)) && (AMREX_SPACEDIM > 1)
373 int mo = linop.getMaxOrder();
374 if (a_sol[0]->hasEBFabFactory()) {
375 linop.setMaxOrder(2);
382 bool is_nsolve = linop.m_parent;
386 RT& composite_norminf = m_final_resnorm0;
389 m_iter_fine_resnorm0.clear();
391 prepareForSolve(a_sol, a_rhs);
393 computeMLResidual(finest_amr_lev);
396 RT resnorm0 = MLResNormInf(finest_amr_lev, local);
397 RT rhsnorm0 = MLRhsNormInf(local);
403 amrex::Print() <<
"MLMG: Initial rhs = " << rhsnorm0 <<
"\n"
404 <<
"MLMG: Initial residual (resid0) = " << resnorm0 <<
"\n";
408 m_init_resnorm0 = resnorm0;
409 m_rhsnorm0 = rhsnorm0;
412 std::string norm_name;
413 if (always_use_bnorm || rhsnorm0 >= resnorm0) {
417 norm_name =
"resid0";
422 if (!is_nsolve && resnorm0 <= res_target) {
423 composite_norminf = resnorm0;
429 bool converged =
false;
431 const int niters = do_fixed_number_of_iters ? do_fixed_number_of_iters : max_iters;
432 for (
int iter = 0; iter < niters; ++iter)
439 computeResidual(finest_amr_lev);
441 if (is_nsolve) {
continue; }
443 RT fine_norminf = ResNormInf(finest_amr_lev);
444 m_iter_fine_resnorm0.push_back(fine_norminf);
445 composite_norminf = fine_norminf;
447 amrex::Print() <<
"MLMG: Iteration " << std::setw(3) << iter+1 <<
" Fine resid/"
448 << norm_name <<
" = " << fine_norminf/max_norm <<
"\n";
450 bool fine_converged = (fine_norminf <= res_target);
452 if (namrlevs == 1 && fine_converged) {
454 }
else if (fine_converged) {
456 computeMLResidual(finest_amr_lev-1);
457 RT crse_norminf = MLResNormInf(finest_amr_lev-1);
459 amrex::Print() <<
"MLMG: Iteration " << std::setw(3) << iter+1
460 <<
" Crse resid/" << norm_name <<
" = "
461 << crse_norminf/max_norm <<
"\n";
463 converged = (crse_norminf <= res_target);
464 composite_norminf =
std::max(fine_norminf, crse_norminf);
472 <<
" resid, resid/" << norm_name <<
" = "
473 << composite_norminf <<
", "
474 << composite_norminf/max_norm <<
"\n";
478 if (composite_norminf >
RT(1.e20)*max_norm)
481 amrex::Print() <<
"MLMG: Failing to converge after " << iter+1 <<
" iterations."
482 <<
" resid, resid/" << norm_name <<
" = "
483 << composite_norminf <<
", "
484 << composite_norminf/max_norm <<
"\n";
488 throw error(
"MLMG blew up.");
496 if (!converged && do_fixed_number_of_iters == 0) {
498 amrex::Print() <<
"MLMG: Failed to converge after " << max_iters <<
" iterations."
499 <<
" resid, resid/" << norm_name <<
" = "
500 << composite_norminf <<
", "
501 << composite_norminf/max_norm <<
"\n";
505 throw error(
"MLMG failed to converge.");
513 linop.postSolve(sol);
516 if (linop.hasHiddenDimension()) {
517 ng_back[linop.hiddenDirection()] = 0;
519 for (
int alev = 0; alev < namrlevs; ++alev)
521 if (!sol_is_alias[alev]) {
522 LocalCopy(*a_sol[alev], sol[alev], 0, 0, ncomp, ng_back);
528 ParallelReduce::Max<double>(timer.data(), timer.size(), 0,
533 <<
" Iter = " << timer[iter_time]
534 <<
" Bottom = " << timer[bottom_time] <<
"\n";
540 return composite_norminf;
543 template <
typename MF>
547 for (
int alev = finest_amr_lev; alev >= 0; --alev) {
548 const MF* crse_bcdata = (alev > 0) ? a_sol[alev-1] :
nullptr;
549 linop.prepareForFluxes(alev, crse_bcdata);
553 template <
typename MF>
554 template <
typename AMF>
559 for (
int alev = 0; alev <= finest_amr_lev; ++alev) {
560 if constexpr (std::is_same<AMF,MF>()) {
561 linop.compGrad(alev, a_grad_sol[alev], sol[alev], a_loc);
564 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
565 auto const& amf = *(a_grad_sol[alev][idim]);
568 linop.compGrad(alev,
GetArrOfPtrs(grad_sol), sol[alev], a_loc);
569 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
570 LocalCopy(*a_grad_sol[alev][idim], grad_sol[idim], 0, 0, ncomp,
IntVect(0));
576 template <
typename MF>
577 template <
typename AMF>
584 template <
typename MF>
585 template <
typename AMF>
590 if (!linop.isCellCentered()) {
591 amrex::Abort(
"Calling wrong getFluxes for nodal solver");
596 if constexpr (std::is_same<AMF,MF>()) {
600 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
601 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
602 auto const& amf = *(a_flux[ilev][idim]);
607 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
608 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
609 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp,
IntVect(0));
615 template <
typename MF>
616 template <
typename AMF>
624 template <
typename MF>
625 template <
typename AMF>
632 if (!linop.isCellCentered()) {
633 amrex::Abort(
"Calling wrong getFluxes for nodal solver");
636 if constexpr (std::is_same<AMF,MF>()) {
637 linop.getFluxes(a_flux, a_sol, a_loc);
640 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
641 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
642 auto const& amf = *(a_flux[ilev][idim]);
648 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
649 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
650 LocalCopy(*a_flux[ilev][idim], fluxes[ilev][idim], 0, 0, ncomp,
IntVect(0));
656 template <
typename MF>
657 template <
typename AMF>
660 std::initializer_list<AMF*> a_sol,
Location a_loc)
666 template <
typename MF>
667 template <
typename AMF>
672 if constexpr (std::is_same<AMF,MF>()) {
676 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
677 auto const& amf = *a_flux[ilev];
681 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
687 template <
typename MF>
688 template <
typename AMF>
695 template <
typename MF>
696 template <
typename AMF>
703 if constexpr (! std::is_same<AMF,MF>()) {
704 for (
int alev = 0; alev < namrlevs; ++alev) {
709 if (linop.isCellCentered())
712 for (
int alev = 0; alev < namrlevs; ++alev) {
713 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
716 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
717 ffluxes[alev][idim].define(
amrex::convert(linop.m_grids[alev][mglev],
719 linop.m_dmap[alev][mglev], ncomp, nghost,
MFInfo(),
720 *linop.m_factory[alev][mglev]);
723 if constexpr (std::is_same<AMF,MF>()) {
728 for (
int alev = 0; alev < namrlevs; ++alev) {
737 if constexpr (std::is_same<AMF,MF>()) {
738 linop.getFluxes(a_flux, a_sol);
741 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
742 auto const& amf = *a_flux[ilev];
746 for (
int ilev = 0; ilev < namrlevs; ++ilev) {
753 template <
typename MF>
754 template <
typename AMF>
757 std::initializer_list<AMF*> a_sol,
Location a_loc)
764 template <
typename MF>
768 if (!linop.isCellCentered()) {
776 template <
typename MF>
778 MLMGT<MF>::getEBFluxes (
const Vector<MF*>& a_eb_flux,
const Vector<MF*>& a_sol)
782 if (!linop.isCellCentered()) {
786 linop.getEBFluxes(a_eb_flux, a_sol);
790 template <
typename MF>
798 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
801 sol_is_alias.resize(namrlevs,
true);
802 for (
int alev = 0; alev < namrlevs; ++alev)
804 if (cf_strategy == CFStrategy::ghostnodes ||
nGrowVect(*a_sol[alev]) == ng_sol)
806 sol[alev] = linop.makeAlias(*a_sol[alev]);
807 sol_is_alias[alev] =
true;
811 if (sol_is_alias[alev])
813 sol[alev] = linop.make(alev, 0, ng_sol);
821 const auto& amrrr = linop.AMRRefRatio();
823 for (
int alev = finest_amr_lev; alev >= 0; --alev) {
824 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) :
nullptr;
825 const MF* prhs = a_rhs[alev];
826 #if (AMREX_SPACEDIM != 3)
827 int nghost = (cf_strategy == CFStrategy::ghostnodes) ? linop.getNGrow(alev) : 0;
829 MFInfo(), *linop.Factory(alev));
831 linop.applyMetricTerm(alev, 0, rhstmp);
832 linop.unimposeNeumannBC(alev, rhstmp);
833 linop.applyInhomogNeumannTerm(alev, rhstmp);
836 linop.solutionResidual(alev, *a_res[alev], sol[alev], *prhs, crse_bcdata);
837 if (alev < finest_amr_lev) {
838 linop.reflux(alev, *a_res[alev], sol[alev], *prhs,
839 *a_res[alev+1], sol[alev+1], *a_rhs[alev+1]);
840 if (linop.isCellCentered()) {
844 average_down(*a_res[alev+1], *a_res[alev], 0, ncomp, amrrr[alev]);
851 #if (AMREX_SPACEDIM != 3)
852 for (
int alev = 0; alev <= finest_amr_lev; ++alev) {
853 linop.unapplyMetricTerm(alev, 0, *a_res[alev]);
858 template <
typename MF>
869 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
871 for (
int alev = 0; alev < namrlevs; ++alev)
873 if (cf_strategy == CFStrategy::ghostnodes)
875 nghost = linop.getNGrow(alev);
876 in[alev] = a_in[alev];
878 else if (
nGrowVect(*a_in[alev]) == ng_sol)
880 in[alev] = a_in[alev];
885 if (cf_strategy == CFStrategy::ghostnodes) { ng =
IntVect(nghost); }
886 in_raii[alev] = linop.make(alev, 0, ng);
888 in[alev] = &(in_raii[alev]);
890 rh[alev] = linop.make(alev, 0,
IntVect(nghost));
896 for (
int alev = 0; alev < namrlevs; ++alev) {
897 linop.applyInhomogNeumannTerm(alev, rh[alev]);
900 const auto& amrrr = linop.AMRRefRatio();
902 for (
int alev = finest_amr_lev; alev >= 0; --alev) {
903 const MF* crse_bcdata = (alev > 0) ? in[alev-1] :
nullptr;
904 linop.solutionResidual(alev, *out[alev], *in[alev], rh[alev], crse_bcdata);
905 if (alev < finest_amr_lev) {
906 linop.reflux(alev, *out[alev], *in[alev], rh[alev],
907 *out[alev+1], *in[alev+1], rh[alev+1]);
908 if (linop.isCellCentered()) {
909 if constexpr (IsMultiFabLike_v<MF>) {
916 amrex::Abort(
"MLMG: TODO average_down for non-MultiFab");
922 #if (AMREX_SPACEDIM != 3)
923 for (
int alev = 0; alev <= finest_amr_lev; ++alev) {
924 linop.unapplyMetricTerm(alev, 0, *out[alev]);
928 for (
int alev = 0; alev <= finest_amr_lev; ++alev) {
929 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(alev); }
930 Scale(*out[alev],
RT(-1), 0,
nComp(*out[alev]), nghost);
934 template <
typename MF>
935 template <
typename AMF>
944 timer.assign(ntimers, 0.0);
948 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
950 if (!linop_prepared) {
951 linop.prepareForSolve();
952 linop_prepared =
true;
953 }
else if (linop.needsUpdate()) {
956 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
957 hypre_solver.reset();
959 hypre_node_solver.reset();
962 #ifdef AMREX_USE_PETSC
963 petsc_solver.reset();
969 sol_is_alias.resize(namrlevs,
false);
970 for (
int alev = 0; alev < namrlevs; ++alev)
972 if (cf_strategy == CFStrategy::ghostnodes)
974 if constexpr (std::is_same<AMF,MF>()) {
975 sol[alev] = linop.makeAlias(*a_sol[alev]);
976 sol_is_alias[alev] =
true;
978 amrex::Abort(
"Type conversion not supported for CFStrategy::ghostnodes");
984 if constexpr (std::is_same<AMF,MF>()) {
985 sol[alev] = linop.makeAlias(*a_sol[alev]);
986 sol_is_alias[alev] =
true;
989 if (!sol_is_alias[alev]) {
991 sol[alev] = linop.make(alev, 0, ng_sol);
999 rhs.resize(namrlevs);
1000 for (
int alev = 0; alev < namrlevs; ++alev)
1002 if (cf_strategy == CFStrategy::ghostnodes) { ng_rhs =
IntVect(linop.getNGrow(alev)); }
1003 if (!solve_called) {
1004 rhs[alev] = linop.make(alev, 0, ng_rhs);
1006 LocalCopy(rhs[alev], *a_rhs[alev], 0, 0, ncomp, ng_rhs);
1007 linop.applyMetricTerm(alev, 0, rhs[alev]);
1008 linop.unimposeNeumannBC(alev, rhs[alev]);
1009 linop.applyInhomogNeumannTerm(alev, rhs[alev]);
1010 linop.applyOverset(alev, rhs[alev]);
1011 linop.scaleRHS(alev, rhs[alev]);
1015 if (factory && !factory->isAllRegular()) {
1016 if constexpr (std::is_same<MF,MultiFab>()) {
1020 amrex::Abort(
"TODO: MLMG with EB only works with MultiFab");
1026 for (
int falev = finest_amr_lev; falev > 0; --falev)
1028 linop.averageDownSolutionRHS(falev-1, sol[falev-1], rhs[falev-1], sol[falev], rhs[falev]);
1032 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1037 IntVect ng = linop.getNGrowVectRestriction();
1038 if (cf_strategy == CFStrategy::ghostnodes) { ng = ng_rhs; }
1039 if (!solve_called) {
1040 linop.make(res, ng);
1041 linop.make(rescor, ng);
1043 for (
int alev = 0; alev <= finest_amr_lev; ++alev)
1045 const int nmglevs = linop.NMGLevels(alev);
1046 for (
int mglev = 0; mglev < nmglevs; ++mglev)
1048 setVal(res [alev][mglev],
RT(0.0));
1049 setVal(rescor[alev][mglev],
RT(0.0));
1053 if (cf_strategy != CFStrategy::ghostnodes) { ng = ng_sol; }
1055 for (
int alev = 0; alev <= finest_amr_lev; ++alev)
1057 const int nmglevs = linop.NMGLevels(alev);
1058 cor[alev].resize(nmglevs);
1059 for (
int mglev = 0; mglev < nmglevs; ++mglev)
1061 if (!solve_called) {
1063 if (cf_strategy == CFStrategy::ghostnodes) { _ng=
IntVect(linop.getNGrow(alev,mglev)); }
1064 cor[alev][mglev] = linop.make(alev, mglev, _ng);
1070 cor_hold.resize(
std::max(namrlevs-1,1));
1073 const int nmglevs = linop.NMGLevels(alev);
1074 cor_hold[alev].resize(nmglevs);
1075 for (
int mglev = 0; mglev < nmglevs-1; ++mglev)
1077 if (!solve_called) {
1079 if (cf_strategy == CFStrategy::ghostnodes) { _ng=
IntVect(linop.getNGrow(alev,mglev)); }
1080 cor_hold[alev][mglev] = linop.make(alev, mglev, _ng);
1082 setVal(cor_hold[alev][mglev],
RT(0.0));
1085 for (
int alev = 1; alev < finest_amr_lev; ++alev)
1087 cor_hold[alev].resize(1);
1088 if (!solve_called) {
1090 if (cf_strategy == CFStrategy::ghostnodes) { _ng=
IntVect(linop.getNGrow(alev)); }
1091 cor_hold[alev][0] = linop.make(alev, 0, _ng);
1093 setVal(cor_hold[alev][0],
RT(0.0));
1097 || !linop.supportNSolve())
1102 if (do_nsolve && ns_linop ==
nullptr)
1108 amrex::Print() <<
"MLMG: # of AMR levels: " << namrlevs <<
"\n"
1109 <<
" # of MG levels on the coarsest AMR level: " << linop.NMGLevels(0)
1112 amrex::Print() <<
" # of MG levels in N-Solve: " << ns_linop->NMGLevels(0) <<
"\n"
1113 <<
" # of grids in N-Solve: " << ns_linop->m_grids[0][0].size() <<
"\n";
1118 template <
typename MF>
1122 if (!linop_prepared) {
1123 linop.prepareForSolve();
1124 linop_prepared =
true;
1125 }
else if (linop.needsUpdate()) {
1130 template <
typename MF>
1135 linop.prepareForGMRES();
1138 template <
typename MF>
1143 IntVect ng = linop.getNGrowVectRestriction();
1144 linop.make(res, ng);
1145 linop.make(rescor, ng);
1147 for (
int alev = 0; alev <= finest_amr_lev; ++alev)
1149 const int nmglevs = linop.NMGLevels(alev);
1150 for (
int mglev = 0; mglev < nmglevs; ++mglev)
1152 setVal(res [alev][mglev],
RT(0.0));
1153 setVal(rescor[alev][mglev],
RT(0.0));
1158 if (linop.hasHiddenDimension()) { ng_sol[linop.hiddenDirection()] = 0; }
1162 for (
int alev = 0; alev <= finest_amr_lev; ++alev)
1164 const int nmglevs = linop.NMGLevels(alev);
1165 cor[alev].resize(nmglevs);
1166 for (
int mglev = 0; mglev < nmglevs; ++mglev)
1168 cor[alev][mglev] = linop.make(alev, mglev, ng);
1173 cor_hold.resize(
std::max(namrlevs-1,1));
1176 const int nmglevs = linop.NMGLevels(alev);
1177 cor_hold[alev].resize(nmglevs);
1178 for (
int mglev = 0; mglev < nmglevs-1; ++mglev)
1180 cor_hold[alev][mglev] = linop.make(alev, mglev, ng);
1181 setVal(cor_hold[alev][mglev],
RT(0.0));
1184 for (
int alev = 1; alev < finest_amr_lev; ++alev)
1186 cor_hold[alev].resize(1);
1187 cor_hold[alev][0] = linop.make(alev, 0, ng);
1188 setVal(cor_hold[alev][0],
RT(0.0));
1193 template <
typename MF>
1197 if constexpr (IsMultiFabLike_v<MF>) {
1198 ns_linop = linop.makeNLinOp(nsolve_grid_size);
1201 if (cf_strategy == CFStrategy::ghostnodes) { nghost = linop.getNGrow(); }
1203 const BoxArray& ba = (*ns_linop).m_grids[0][0];
1207 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1208 ns_sol = std::make_unique<MF>(ba, dm, ncomp, ng,
MFInfo(), *(ns_linop->Factory(0,0)));
1210 if (cf_strategy == CFStrategy::ghostnodes) { ng = nghost; }
1211 ns_rhs = std::make_unique<MF>(ba, dm, ncomp, ng,
MFInfo(), *(ns_linop->Factory(0,0)));
1215 ns_linop->setLevelBC(0, ns_sol.get());
1217 ns_mlmg = std::make_unique<MLMGT<MF>>(*ns_linop);
1218 ns_mlmg->setVerbose(0);
1219 ns_mlmg->setFixedIter(1);
1220 ns_mlmg->setMaxFmgIter(20);
1227 template <
typename MF>
1232 for (
int alev = finest_amr_lev; alev > 0; --alev)
1237 if (cf_strategy == CFStrategy::ghostnodes) { nghost =
IntVect(linop.getNGrow(alev)); }
1238 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1241 computeResWithCrseSolFineCor(alev-1,alev);
1243 if (alev != finest_amr_lev) {
1244 std::swap(cor_hold[alev][0], cor[alev][0]);
1251 if (linop.isSingular(0) && linop.getEnforceSingularSolvable())
1253 makeSolvable(0,0,res[0][0]);
1256 if (iter < max_fmg_iters) {
1263 if (cf_strategy == CFStrategy::ghostnodes) { nghost =
IntVect(linop.getNGrow(0)); }
1264 LocalAdd(sol[0], cor[0][0], 0, 0, ncomp, nghost);
1267 for (
int alev = 1; alev <= finest_amr_lev; ++alev)
1270 interpCorrection(alev);
1273 if (cf_strategy == CFStrategy::ghostnodes) { nghost =
IntVect(linop.getNGrow(alev)); }
1274 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1276 if (alev != finest_amr_lev) {
1277 LocalAdd(cor_hold[alev][0], cor[alev][0], 0, 0, ncomp, nghost);
1281 computeResWithCrseCorFineCor(alev);
1285 LocalAdd(sol[alev], cor[alev][0], 0, 0, ncomp, nghost);
1287 if (alev != finest_amr_lev) {
1288 LocalAdd(cor[alev][0], cor_hold[alev][0], 0, 0, ncomp, nghost);
1292 linop.averageDownAndSync(sol);
1295 template <
typename MF>
1300 const int mglev = 0;
1301 mgVcycle(amrlev, mglev);
1306 template <
typename MF>
1312 const int mglev_bottom = linop.NMGLevels(amrlev) - 1;
1314 for (
int mglev = mglev_top; mglev < mglev_bottom; ++mglev)
1316 BL_PROFILE_VAR(
"MLMG::mgVcycle_down::"+std::to_string(mglev), blp_mgv_down_lev);
1321 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev
1322 <<
" DN: Norm before smooth " <<
norm <<
"\n";
1325 setVal(cor[amrlev][mglev],
RT(0.0));
1326 bool skip_fillboundary =
true;
1327 for (
int i = 0; i < nu1; ++i) {
1328 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev], skip_fillboundary);
1329 skip_fillboundary =
false;
1333 computeResOfCorrection(amrlev, mglev);
1338 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev
1339 <<
" DN: Norm after smooth " <<
norm <<
"\n";
1343 linop.restriction(amrlev, mglev+1, res[amrlev][mglev+1], rescor[amrlev][mglev]);
1352 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev_bottom
1353 <<
" DN: Norm before bottom " <<
norm <<
"\n";
1358 computeResOfCorrection(amrlev, mglev_bottom);
1360 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev_bottom
1361 <<
" UP: Norm after bottom " <<
norm <<
"\n";
1369 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev_bottom
1370 <<
" Norm before smooth " <<
norm <<
"\n";
1372 setVal(cor[amrlev][mglev_bottom],
RT(0.0));
1373 bool skip_fillboundary =
true;
1374 for (
int i = 0; i < nu1; ++i) {
1375 linop.smooth(amrlev, mglev_bottom, cor[amrlev][mglev_bottom],
1376 res[amrlev][mglev_bottom], skip_fillboundary);
1377 skip_fillboundary =
false;
1381 computeResOfCorrection(amrlev, mglev_bottom);
1383 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev_bottom
1384 <<
" Norm after smooth " <<
norm <<
"\n";
1389 for (
int mglev = mglev_bottom-1; mglev >= mglev_top; --mglev)
1391 BL_PROFILE_VAR(
"MLMG::mgVcycle_up::"+std::to_string(mglev), blp_mgv_up_lev);
1393 addInterpCorrection(amrlev, mglev);
1396 computeResOfCorrection(amrlev, mglev);
1398 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev
1399 <<
" UP: Norm before smooth " <<
norm <<
"\n";
1401 for (
int i = 0; i < nu2; ++i) {
1402 linop.smooth(amrlev, mglev, cor[amrlev][mglev], res[amrlev][mglev]);
1405 if (cf_strategy == CFStrategy::ghostnodes) { computeResOfCorrection(amrlev, mglev); }
1409 computeResOfCorrection(amrlev, mglev);
1411 amrex::Print() <<
"AT LEVEL " << amrlev <<
" " << mglev
1412 <<
" UP: Norm after smooth " <<
norm <<
"\n";
1420 template <
typename MF>
1430 const int amrlev = 0;
1431 const int mg_bottom_lev = linop.NMGLevels(amrlev) - 1;
1433 if (cf_strategy == CFStrategy::ghostnodes) { nghost =
IntVect(linop.getNGrow(amrlev)); }
1435 for (
int mglev = 1; mglev <= mg_bottom_lev; ++mglev)
1437 linop.avgDownResMG(mglev, res[amrlev][mglev], res[amrlev][mglev-1]);
1442 for (
int mglev = mg_bottom_lev-1; mglev >= 0; --mglev)
1445 interpCorrection(amrlev, mglev);
1448 computeResOfCorrection(amrlev, mglev);
1450 LocalCopy(res[amrlev][mglev], rescor[amrlev][mglev], 0, 0, ncomp, nghost);
1453 std::swap(cor[amrlev][mglev], cor_hold[amrlev][mglev]);
1454 mgVcycle(amrlev, mglev);
1455 LocalAdd(cor[amrlev][mglev], cor_hold[amrlev][mglev], 0, 0, ncomp, nghost);
1462 template <
typename MF>
1468 NSolve(*ns_mlmg, *ns_sol, *ns_rhs);
1472 actualBottomSolve();
1476 template <
typename MF>
1484 MF
const& res_bottom = res[0].back();
1495 RT(-1.0),
RT(-1.0));
1497 linop.copyNSolveSolution(cor[0].back(), a_sol);
1500 template <
typename MF>
1506 if (!linop.isBottomActive()) {
return; }
1512 const int amrlev = 0;
1513 const int mglev = linop.NMGLevels(amrlev) - 1;
1514 auto&
x = cor[amrlev][mglev];
1515 auto&
b = res[amrlev][mglev];
1521 bool skip_fillboundary =
true;
1522 for (
int i = 0; i < nuf; ++i) {
1523 linop.smooth(amrlev, mglev,
x,
b, skip_fillboundary);
1524 skip_fillboundary =
false;
1531 if (linop.isBottomSingular() && linop.getEnforceSingularSolvable())
1534 raii_b = linop.make(amrlev, mglev, ng);
1538 makeSolvable(amrlev,mglev,*bottom_b);
1543 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1544 if constexpr (std::is_same<MF,MultiFab>()) {
1545 bottomSolveWithHypre(
x, *bottom_b);
1549 amrex::Abort(
"Using Hypre as bottom solver not supported in this case");
1554 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1555 if constexpr (std::is_same<MF,MultiFab>()) {
1556 bottomSolveWithPETSc(
x, *bottom_b);
1560 amrex::Abort(
"Using PETSc as bottom solver not supported in this case");
1573 int ret = bottomSolveWithCG(
x, *bottom_b, cg_type);
1583 setVal(cor[amrlev][mglev],
RT(0.0));
1584 ret = bottomSolveWithCG(
x, *bottom_b, cg_type);
1595 if (ret != 0 && ret != 9) {
1596 setVal(cor[amrlev][mglev],
RT(0.0));
1598 const int n = (ret==0) ? nub : nuf;
1599 for (
int i = 0; i < n; ++i) {
1600 linop.smooth(amrlev, mglev,
x,
b);
1607 if (! timer.empty()) {
1612 template <
typename MF>
1621 if (cf_strategy == CFStrategy::ghostnodes) { cg_solver.
setNGhost(linop.getNGrow()); }
1623 int ret = cg_solver.
solve(
x,
b, bottom_reltol, bottom_abstol);
1624 if (ret != 0 &&
verbose > 1) {
1632 template <
typename MF>
1638 const int mglev = 0;
1639 for (
int alev = amrlevmax; alev >= 0; --alev) {
1640 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) :
nullptr;
1641 linop.solutionResidual(alev, res[alev][mglev], sol[alev], rhs[alev], crse_bcdata);
1642 if (alev < finest_amr_lev) {
1643 linop.reflux(alev, res[alev][mglev], sol[alev], rhs[alev],
1644 res[alev+1][mglev], sol[alev+1], rhs[alev+1]);
1650 template <
typename MF>
1655 const MF* crse_bcdata = (alev > 0) ? &(sol[alev-1]) :
nullptr;
1656 linop.solutionResidual(alev, res[alev][0], sol[alev], rhs[alev], crse_bcdata);
1660 template <
typename MF>
1664 BL_PROFILE(
"MLMG::computeResWithCrseSolFineCor()");
1667 if (cf_strategy == CFStrategy::ghostnodes) {
1668 nghost =
IntVect(
std::min(linop.getNGrow(falev),linop.getNGrow(calev)));
1671 MF& crse_sol = sol[calev];
1672 const MF& crse_rhs = rhs[calev];
1673 MF& crse_res = res[calev][0];
1675 MF& fine_sol = sol[falev];
1676 const MF& fine_rhs = rhs[falev];
1677 MF& fine_cor = cor[falev][0];
1678 MF& fine_res = res[falev][0];
1679 MF& fine_rescor = rescor[falev][0];
1681 const MF* crse_bcdata = (calev > 0) ? &(sol[calev-1]) :
nullptr;
1682 linop.solutionResidual(calev, crse_res, crse_sol, crse_rhs, crse_bcdata);
1684 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res, BCMode::Homogeneous);
1685 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1687 linop.reflux(calev, crse_res, crse_sol, crse_rhs, fine_res, fine_sol, fine_rhs);
1689 linop.avgDownResAmr(calev, crse_res, fine_res);
1693 template <
typename MF>
1697 BL_PROFILE(
"MLMG::computeResWithCrseCorFineCor()");
1700 if (cf_strategy == CFStrategy::ghostnodes) {
1701 nghost =
IntVect(linop.getNGrow(falev));
1704 const MF& crse_cor = cor[falev-1][0];
1706 MF& fine_cor = cor [falev][0];
1707 MF& fine_res = res [falev][0];
1708 MF& fine_rescor = rescor[falev][0];
1711 linop.correctionResidual(falev, 0, fine_rescor, fine_cor, fine_res,
1712 BCMode::Inhomogeneous, &crse_cor);
1713 LocalCopy(fine_res, fine_rescor, 0, 0, ncomp, nghost);
1717 template <
typename MF>
1724 if (cf_strategy == CFStrategy::ghostnodes) {
1725 nghost =
IntVect(linop.getNGrow(alev));
1728 MF
const& crse_cor = cor[alev-1][0];
1729 MF & fine_cor = cor[alev ][0];
1731 const Geometry& crse_geom = linop.Geom(alev-1,0);
1734 int ng_dst = linop.isCellCentered() ? 1 : 0;
1735 if (cf_strategy == CFStrategy::ghostnodes)
1737 ng_src = linop.getNGrow(alev-1);
1738 ng_dst = linop.getNGrow(alev-1);
1741 MF cfine = linop.makeCoarseAmr(alev,
IntVect(ng_dst));
1746 linop.interpolationAmr(alev, fine_cor, cfine, nghost);
1752 template <
typename MF>
1758 MF& crse_cor = cor[alev][mglev+1];
1759 MF& fine_cor = cor[alev][mglev ];
1760 linop.interpAssign(alev, mglev, fine_cor, crse_cor);
1764 template <
typename MF>
1770 const MF& crse_cor = cor[alev][mglev+1];
1771 MF& fine_cor = cor[alev][mglev ];
1776 if (linop.isMFIterSafe(alev, mglev, mglev+1))
1782 cfine = linop.makeCoarseMG(alev, mglev,
IntVect(0));
1787 linop.interpolation(alev, mglev, fine_cor, *cmf);
1794 template <
typename MF>
1799 MF &
x = cor[amrlev][mglev];
1800 const MF&
b = res[amrlev][mglev];
1801 MF &
r = rescor[amrlev][mglev];
1802 linop.correctionResidual(amrlev, mglev,
r,
x,
b, BCMode::Homogeneous);
1806 template <
typename MF>
1811 return linop.normInf(alev, res[alev][0], local);
1815 template <
typename MF>
1821 for (
int alev = 0; alev <= alevmax; ++alev)
1830 template <
typename MF>
1836 for (
int alev = 0; alev <= finest_amr_lev; ++alev) {
1837 auto t = linop.normInf(alev, rhs[alev],
true);
1844 template <
typename MF>
1848 auto const&
offset = linop.getSolvabilityOffset(0, 0, rhs[0]);
1850 for (
int c = 0; c < ncomp; ++c) {
1855 for (
int alev = 0; alev < namrlevs; ++alev) {
1856 linop.fixSolvabilityByOffset(alev, 0, rhs[alev],
offset);
1860 template <
typename MF>
1864 auto const&
offset = linop.getSolvabilityOffset(amrlev, mglev, mf);
1866 for (
int c = 0; c < ncomp; ++c) {
1868 <<
" from mf component c = " << c
1869 <<
" on level (" << amrlev <<
", " << mglev <<
")\n";
1872 linop.fixSolvabilityByOffset(amrlev, mglev, mf,
offset);
1875 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
1876 template <
typename MF>
1877 template <
class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,
int>>
1881 const int amrlev = 0;
1882 const int mglev = linop.NMGLevels(amrlev) - 1;
1886 if (linop.isCellCentered())
1888 if (hypre_solver ==
nullptr)
1890 hypre_solver = linop.makeHypre(hypre_interface);
1892 hypre_solver->setVerbose(bottom_verbose);
1894 hypre_solver->setHypreOptionsNamespace(hypre_options_namespace);
1896 hypre_solver->setHypreOldDefault(hypre_old_default);
1897 hypre_solver->setHypreRelaxType(hypre_relax_type);
1898 hypre_solver->setHypreRelaxOrder(hypre_relax_order);
1899 hypre_solver->setHypreNumSweeps(hypre_num_sweeps);
1900 hypre_solver->setHypreStrongThreshold(hypre_strong_threshold);
1903 const BoxArray& ba = linop.m_grids[amrlev].back();
1904 const DistributionMapping& dm = linop.m_dmap[amrlev].back();
1905 const Geometry& geom = linop.m_geom[amrlev].back();
1907 hypre_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1908 hypre_bndry->setHomogValues();
1909 const Real* dx = linop.m_geom[0][0].CellSize();
1910 IntVect crse_ratio = linop.m_coarse_data_crse_ratio.
allGT(0) ? linop.m_coarse_data_crse_ratio :
IntVect(1);
1911 RealVect bclocation(
AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1912 0.5*dx[1]*crse_ratio[1],
1913 0.5*dx[2]*crse_ratio[2]));
1914 hypre_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc,
IntVect(-1), bclocation,
1915 linop.m_coarse_fine_bc_type);
1919 amrex::Real hypre_abstol =
1921 ? bottom_abstol : Real(-1.0);
1922 hypre_solver->solve(
1923 x, b, bottom_reltol, hypre_abstol, bottom_maxiter, *hypre_bndry,
1924 linop.getMaxOrder());
1928 if (hypre_node_solver ==
nullptr)
1931 linop.makeHypreNodeLap(bottom_verbose, hypre_options_namespace);
1933 hypre_node_solver->solve(x, b, bottom_reltol, bottom_abstol, bottom_maxiter);
1938 if (linop.isSingular(amrlev) && linop.getEnforceSingularSolvable())
1940 makeSolvable(amrlev, mglev, x);
1945 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
1946 template <
typename MF>
1947 template <
class TMF,std::enable_if_t<std::is_same_v<TMF,MultiFab>,
int>>
1949 MLMGT<MF>::bottomSolveWithPETSc (MF& x,
const MF& b)
1953 if(petsc_solver ==
nullptr)
1955 petsc_solver = linop.makePETSc();
1956 petsc_solver->setVerbose(bottom_verbose);
1958 const BoxArray& ba = linop.m_grids[0].back();
1959 const DistributionMapping& dm = linop.m_dmap[0].back();
1960 const Geometry& geom = linop.m_geom[0].back();
1962 petsc_bndry = std::make_unique<MLMGBndryT<MF>>(ba, dm, ncomp, geom);
1963 petsc_bndry->setHomogValues();
1964 const Real* dx = linop.m_geom[0][0].CellSize();
1965 auto crse_ratio = linop.m_coarse_data_crse_ratio.allGT(0) ? linop.m_coarse_data_crse_ratio :
IntVect(1);
1966 RealVect bclocation(
AMREX_D_DECL(0.5*dx[0]*crse_ratio[0],
1967 0.5*dx[1]*crse_ratio[1],
1968 0.5*dx[2]*crse_ratio[2]));
1969 petsc_bndry->setLOBndryConds(linop.m_lobc, linop.m_hibc,
IntVect(-1), bclocation,
1970 linop.m_coarse_fine_bc_type);
1972 petsc_solver->solve(x, b, bottom_reltol, Real(-1.), bottom_maxiter, *petsc_bndry,
1973 linop.getMaxOrder());
1977 template <
typename MF>
1981 RT a_tol_rel,
RT a_tol_abs,
const char* a_file_name)
const
1983 std::string file_name(a_file_name);
1988 std::string HeaderFileName(std::string(a_file_name)+
"/Header");
1989 std::ofstream HeaderFile;
1990 HeaderFile.open(HeaderFileName.c_str(), std::ofstream::out |
1991 std::ofstream::trunc |
1992 std::ofstream::binary);
1993 if( ! HeaderFile.good()) {
1997 HeaderFile.precision(17);
1999 HeaderFile << linop.name() <<
"\n"
2000 <<
"a_tol_rel = " << a_tol_rel <<
"\n"
2001 <<
"a_tol_abs = " << a_tol_abs <<
"\n"
2002 <<
"verbose = " <<
verbose <<
"\n"
2003 <<
"max_iters = " << max_iters <<
"\n"
2004 <<
"nu1 = " << nu1 <<
"\n"
2005 <<
"nu2 = " << nu2 <<
"\n"
2006 <<
"nuf = " << nuf <<
"\n"
2007 <<
"nub = " << nub <<
"\n"
2008 <<
"max_fmg_iters = " << max_fmg_iters <<
"\n"
2009 <<
"bottom_solver = " <<
static_cast<int>(bottom_solver) <<
"\n"
2010 <<
"bottom_verbose = " << bottom_verbose <<
"\n"
2011 <<
"bottom_maxiter = " << bottom_maxiter <<
"\n"
2012 <<
"bottom_reltol = " << bottom_reltol <<
"\n"
2013 <<
"always_use_bnorm = " << always_use_bnorm <<
"\n"
2014 <<
"namrlevs = " << namrlevs <<
"\n"
2015 <<
"finest_amr_lev = " << finest_amr_lev <<
"\n"
2016 <<
"linop_prepared = " << linop_prepared <<
"\n"
2017 <<
"solve_called = " << solve_called <<
"\n";
2019 for (
int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2026 for (
int ilev = 0; ilev <= finest_amr_lev; ++ilev) {
2027 VisMF::Write(*a_sol[ilev], file_name+
"/Level_"+std::to_string(ilev)+
"/sol");
2028 VisMF::Write(*a_rhs[ilev], file_name+
"/Level_"+std::to_string(ilev)+
"/rhs");
2031 linop.checkPoint(file_name+
"/linop");
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition: AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition: AMReX_BLProfiler.H:560
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
Print on all processors of the default communicator.
Definition: AMReX_Print.H:117
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:550
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:820
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
static bool SameRefs(const DistributionMapping &lhs, const DistributionMapping &rhs)
Definition: AMReX_DistributionMapping.H:187
Definition: AMReX_EBFabFactory.H:24
Solve using GMRES with multigrid as preconditioner.
Definition: AMReX_GMRES_MLMG.H:20
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition: AMReX_Geometry.H:355
Interface
Definition: AMReX_Hypre.H:21
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition: AMReX_IntVect.H:768
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition: AMReX_IntVect.H:418
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:691
Definition: AMReX_MLCGSolver.H:12
void setSolver(Type _typ) noexcept
Definition: AMReX_MLCGSolver.H:28
void setVerbose(int _verbose)
Definition: AMReX_MLCGSolver.H:39
int getNumIters() const noexcept
Definition: AMReX_MLCGSolver.H:63
void setInitSolnZeroed(bool _sol_zeroed)
Definition: AMReX_MLCGSolver.H:52
int solve(MF &solnL, const MF &rhsL, RT eps_rel, RT eps_abs)
Definition: AMReX_MLCGSolver.H:87
Type
Definition: AMReX_MLCGSolver.H:18
void setNGhost(int _nghost)
Definition: AMReX_MLCGSolver.H:55
void setMaxIter(int _maxiter)
Definition: AMReX_MLCGSolver.H:42
Definition: AMReX_MLLinOp.H:98
typename FabDataType< MF >::fab_type FAB
Definition: AMReX_MLLinOp.H:108
typename FabDataType< MF >::value_type RT
Definition: AMReX_MLLinOp.H:109
Definition: AMReX_MLMG.H:17
Definition: AMReX_MLMG.H:12
void prepareForFluxes(Vector< MF const * > const &a_sol)
Definition: AMReX_MLMG.H:545
void setBottomVerbose(int v) noexcept
Definition: AMReX_MLMG.H:129
void setMaxFmgIter(int n) noexcept
Definition: AMReX_MLMG.H:119
RT MLResNormInf(int alevmax, bool local=false)
Definition: AMReX_MLMG.H:1817
RT MLRhsNormInf(bool local=false)
Definition: AMReX_MLMG.H:1832
MLMGT(MLMGT< MF > &&)=delete
void actualBottomSolve()
Definition: AMReX_MLMG.H:1502
Vector< Vector< MF > > rescor
Definition: AMReX_MLMG.H:315
MF MFType
Definition: AMReX_MLMG.H:25
bool linop_prepared
Definition: AMReX_MLMG.H:263
int bottom_verbose
Definition: AMReX_MLMG.H:249
void checkPoint(const Vector< MultiFab * > &a_sol, const Vector< MultiFab const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *a_file_name) const
Definition: AMReX_MLMG.H:1979
void setPreSmooth(int n) noexcept
Definition: AMReX_MLMG.H:122
void setBottomToleranceAbs(RT t) noexcept
Definition: AMReX_MLMG.H:132
Vector< Vector< MF > > cor
= rhs - L(sol)
Definition: AMReX_MLMG.H:313
RT getFinalResidual() const noexcept
Definition: AMReX_MLMG.H:224
int do_nsolve
N Solve.
Definition: AMReX_MLMG.H:267
void interpCorrection(int alev)
Definition: AMReX_MLMG.H:1719
Long solve_called
Definition: AMReX_MLMG.H:264
void setBottomSmooth(int n) noexcept
Definition: AMReX_MLMG.H:125
int final_fill_bc
Definition: AMReX_MLMG.H:256
void setNSolve(int flag) noexcept
Definition: AMReX_MLMG.H:141
std::unique_ptr< MF > ns_sol
Definition: AMReX_MLMG.H:271
Vector< Vector< MF > > cor_hold
Definition: AMReX_MLMG.H:314
void computeResOfCorrection(int amrlev, int mglev)
Definition: AMReX_MLMG.H:1796
void setCFStrategy(CFStrategy a_cf_strategy) noexcept
Definition: AMReX_MLMG.H:128
void computeResWithCrseCorFineCor(int falev)
Definition: AMReX_MLMG.H:1695
void NSolve(MLMGT< MF > &a_solver, MF &a_sol, MF &a_rhs)
Definition: AMReX_MLMG.H:1478
typename MLLinOpT< MF >::Location Location
Definition: AMReX_MLMG.H:30
RT m_final_resnorm0
Definition: AMReX_MLMG.H:323
void apply(const Vector< MF * > &out, const Vector< MF * > &in)
out = L(in). Note that, if no actual solve is needed, one could turn off multigrid coarsening by cons...
Definition: AMReX_MLMG.H:860
Vector< MF > rhs
Definition: AMReX_MLMG.H:303
void setNSolveGridSize(int s) noexcept
Definition: AMReX_MLMG.H:142
int nuf
when smoother is used as bottom solver
Definition: AMReX_MLMG.H:242
void setVerbose(int v) noexcept
Definition: AMReX_MLMG.H:117
int nu1
pre
Definition: AMReX_MLMG.H:240
void computeMLResidual(int amrlevmax)
Definition: AMReX_MLMG.H:1634
Vector< RT > m_iter_fine_resnorm0
Definition: AMReX_MLMG.H:325
Vector< RT > const & getResidualHistory() const noexcept
Definition: AMReX_MLMG.H:226
RT getInitResidual() const noexcept
Definition: AMReX_MLMG.H:222
int getNumIters() const noexcept
Definition: AMReX_MLMG.H:227
void setPostSmooth(int n) noexcept
Definition: AMReX_MLMG.H:123
BottomSolver bottom_solver
Definition: AMReX_MLMG.H:247
void mgVcycle(int amrlev, int mglev)
Definition: AMReX_MLMG.H:1308
MLLinOpT< MF > & linop
Definition: AMReX_MLMG.H:258
int ncomp
Definition: AMReX_MLMG.H:259
void prepareForNSolve()
Definition: AMReX_MLMG.H:1195
int always_use_bnorm
Definition: AMReX_MLMG.H:254
int max_fmg_iters
Definition: AMReX_MLMG.H:245
void makeSolvable()
Definition: AMReX_MLMG.H:1846
void setBottomSolver(BottomSolver s) noexcept
Definition: AMReX_MLMG.H:127
int namrlevs
Definition: AMReX_MLMG.H:260
int nub
additional smoothing after bottom cg solver
Definition: AMReX_MLMG.H:243
typename MLLinOpT< MF >::RT RT
Definition: AMReX_MLMG.H:27
int max_iters
Definition: AMReX_MLMG.H:237
int do_fixed_number_of_iters
Definition: AMReX_MLMG.H:238
void setThrowException(bool t) noexcept
Definition: AMReX_MLMG.H:116
MLMGT< MF > & operator=(MLMGT< MF > const &)=delete
void setFixedIter(int nit) noexcept
Definition: AMReX_MLMG.H:120
int finest_amr_lev
Definition: AMReX_MLMG.H:261
RT getBottomToleranceAbs() noexcept
Definition: AMReX_MLMG.H:133
void getGradSolution(const Vector< Array< AMF *, AMREX_SPACEDIM > > &a_grad_sol, Location a_loc=Location::FaceCenter)
Definition: AMReX_MLMG.H:556
void prepareLinOp()
Definition: AMReX_MLMG.H:1120
RT m_rhsnorm0
Definition: AMReX_MLMG.H:321
CFStrategy
Definition: AMReX_MLMG.H:33
Vector< int > const & getNumCGIters() const noexcept
Definition: AMReX_MLMG.H:228
void prepareForSolve(Vector< AMF * > const &a_sol, Vector< AMF const * > const &a_rhs)
Definition: AMReX_MLMG.H:937
int bottomSolveWithCG(MF &x, const MF &b, typename MLCGSolverT< MF >::Type type)
Definition: AMReX_MLMG.H:1614
std::unique_ptr< MLMGT< MF > > ns_mlmg
Definition: AMReX_MLMG.H:270
void setAlwaysUseBNorm(int flag) noexcept
Definition: AMReX_MLMG.H:135
void compResidual(const Vector< MF * > &a_res, const Vector< MF * > &a_sol, const Vector< MF const * > &a_rhs)
Definition: AMReX_MLMG.H:792
void miniCycle(int amrlev)
Definition: AMReX_MLMG.H:1297
RT solve(std::initializer_list< AMF * > a_sol, std::initializer_list< AMF const * > a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
int bottom_maxiter
Definition: AMReX_MLMG.H:250
bool throw_exception
Definition: AMReX_MLMG.H:234
RT bottom_reltol
Definition: AMReX_MLMG.H:251
Vector< Vector< MF > > res
First Vector: Amr levels. 0 is the coarest level Second Vector: MG levels. 0 is the finest level.
Definition: AMReX_MLMG.H:312
void setFinalFillBC(int flag) noexcept
Definition: AMReX_MLMG.H:137
typename MLLinOpT< MF >::BCMode BCMode
Definition: AMReX_MLMG.H:29
void computeResWithCrseSolFineCor(int calev, int falev)
Definition: AMReX_MLMG.H:1662
Vector< MF > sol
Hypre.
Definition: AMReX_MLMG.H:302
int nsolve_grid_size
Definition: AMReX_MLMG.H:268
MLMGT(MLLinOpT< MF > &a_lp)
Definition: AMReX_MLMG.H:334
int verbose
Definition: AMReX_MLMG.H:235
void computeResidual(int alev)
Definition: AMReX_MLMG.H:1652
std::unique_ptr< MF > ns_rhs
Definition: AMReX_MLMG.H:272
timer_types
Definition: AMReX_MLMG.H:318
@ ntimers
Definition: AMReX_MLMG.H:318
@ bottom_time
Definition: AMReX_MLMG.H:318
@ iter_time
Definition: AMReX_MLMG.H:318
@ solve_time
Definition: AMReX_MLMG.H:318
CFStrategy cf_strategy
Definition: AMReX_MLMG.H:248
typename MLLinOpT< MF >::FAB FAB
Definition: AMReX_MLMG.H:26
Vector< int > sol_is_alias
Definition: AMReX_MLMG.H:306
void prepareMGcycle()
Definition: AMReX_MLMG.H:1140
int numAMRLevels() const noexcept
Definition: AMReX_MLMG.H:139
RT bottom_abstol
Definition: AMReX_MLMG.H:252
Vector< int > m_niters_cg
Definition: AMReX_MLMG.H:324
MLMGT(MLMGT< MF > const &)=delete
void mgFcycle()
Definition: AMReX_MLMG.H:1422
Vector< double > timer
Definition: AMReX_MLMG.H:319
RT getInitRHS() const noexcept
Definition: AMReX_MLMG.H:220
RT ResNormInf(int alev, bool local=false)
Definition: AMReX_MLMG.H:1808
int nu2
post
Definition: AMReX_MLMG.H:241
void prepareForGMRES()
Definition: AMReX_MLMG.H:1132
void bottomSolve()
Definition: AMReX_MLMG.H:1464
void getFluxes(const Vector< Array< AMF *, AMREX_SPACEDIM > > &a_flux, Location a_loc=Location::FaceCenter)
For (alpha * a - beta * (del dot b grad)) phi = rhs, flux means -b grad phi
Definition: AMReX_MLMG.H:587
void setBottomTolerance(RT t) noexcept
Definition: AMReX_MLMG.H:131
void setFinalSmooth(int n) noexcept
Definition: AMReX_MLMG.H:124
std::unique_ptr< MLLinOpT< MF > > ns_linop
Definition: AMReX_MLMG.H:269
MLLinOpT< MF > & getLinOp()
Definition: AMReX_MLMG.H:230
void addInterpCorrection(int alev, int mglev)
Definition: AMReX_MLMG.H:1766
RT m_init_resnorm0
Definition: AMReX_MLMG.H:322
RT solve(const Vector< AMF * > &a_sol, const Vector< AMF const * > &a_rhs, RT a_tol_rel, RT a_tol_abs, const char *checkpoint_file=nullptr)
void setBottomMaxIter(int n) noexcept
Definition: AMReX_MLMG.H:130
void oneIter(int iter)
Definition: AMReX_MLMG.H:1228
void setMaxIter(int n) noexcept
Definition: AMReX_MLMG.H:118
This class provides the user with a few print options.
Definition: AMReX_Print.H:35
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
static Long Write(const FabArray< FArrayBox > &mf, const std::string &name, VisMF::How how=NFiles, bool set_ghost=false)
Write a FabArray<FArrayBox> to disk in a "smart" way. Returns the total number of bytes written on th...
Definition: AMReX_VisMF.cpp:975
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:126
void push(MPI_Comm c)
Definition: AMReX_ParallelContext.H:102
void BarrierSub() noexcept
Definition: AMReX_ParallelContext.H:88
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition: AMReX_ParallelContext.H:76
void pop()
Note that it's the user's responsibility to free the MPI_Comm.
Definition: AMReX_ParallelContext.H:108
bool IOProcessorSub() noexcept
Am IO processor for current frame?
Definition: AMReX_ParallelContext.H:80
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void swap(T &a, T &b) noexcept
Definition: AMReX_algoim_K.H:113
@ min
Definition: AMReX_ParallelReduce.H:18
@ max
Definition: AMReX_ParallelReduce.H:17
bool throw_exception
Definition: AMReX.cpp:114
Definition: AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition: AMReX_FabArrayUtility.H:1682
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
int nComp(FabArrayBase const &fa)
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition: AMReX_Utility.cpp:131
std::array< T const *, AMREX_SPACEDIM > GetArrOfConstPtrs(const std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition: AMReX_Array.H:864
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
IntVect nGrowVect(FabArrayBase const &fa)
void EB_average_face_to_cellcenter(MultiFab &ccmf, int dcomp, const Array< MultiFab const *, AMREX_SPACEDIM > &fmf)
Definition: AMReX_EBMultiFabUtil.cpp:806
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition: AMReX_FabArrayUtility.H:1630
void EB_set_covered(MultiFab &mf, Real val)
Definition: AMReX_EBMultiFabUtil.cpp:21
std::array< T *, AMREX_SPACEDIM > GetArrOfPtrs(std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition: AMReX_Array.H:852
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition: AMReX_FabArrayUtility.H:1638
Vector< T * > GetVecOfPtrs(Vector< T > &a)
Definition: AMReX_Vector.H:61
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
BoxArray const & boxArray(FabArrayBase const &fa)
double second() noexcept
Definition: AMReX_Utility.cpp:922
void UtilCreateCleanDirectory(const std::string &path, bool callbarrier=true)
Create a new directory, renaming the old one if it exists.
Definition: AMReX_Utility.cpp:161
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition: AMReX_EBMultiFabUtil.cpp:336
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
BottomSolver
Definition: AMReX_MLLinOp.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow)
Average face-based MultiFab onto cell-centered MultiFab.
Definition: AMReX_MultiFabUtil.cpp:141
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition: AMReX_GpuComplex.H:344
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition: AMReX_FabArrayUtility.H:1614
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition: AMReX_FabArrayUtility.H:1621
Vector< std::array< T *, AMREX_SPACEDIM > > GetVecOfArrOfPtrs(const Vector< std::array< std::unique_ptr< T >, AMREX_SPACEDIM > > &a)
Definition: AMReX_Vector.H:120
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
const int[]
Definition: AMReX_BLProfiler.cpp:1664
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition: AMReX_FabArrayUtility.H:1672
int verbose
Definition: AMReX_DistributionMapping.cpp:36
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition: AMReX_FabArrayUtility.H:1607
std::array< T, N > Array
Definition: AMReX_Array.H:24
BCMode
Definition: AMReX_MLLinOp.H:85
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66