1 #ifndef AMREX_ML_LINOP_H_
2 #define AMREX_ML_LINOP_H_
3 #include <AMReX_Config.H>
5 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
10 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
90 template <
typename T>
class MLMGT;
91 template <
typename T>
class MLCGSolverT;
92 template <
typename T>
class MLPoissonT;
93 template <
typename T>
class MLABecLaplacianT;
94 template <
typename T>
class GMRESMLMGT;
96 template <
typename MF>
101 template <
typename T>
friend class MLMGT;
129 bool eb_limit_coarsening =
true);
131 [[nodiscard]]
virtual std::string
name ()
const {
return std::string(
"Unspecified"); }
201 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
204 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
206 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int> = 0>
208 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
210 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int> = 0>
212 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
235 const MF* =
nullptr) = 0;
237 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int> = 0>
239 const AMF* robinbc_a =
nullptr,
240 const AMF* robinbc_b =
nullptr,
241 const AMF* robinbc_f =
nullptr);
261 [[nodiscard]]
virtual int getNComp ()
const {
return 1; }
263 [[nodiscard]]
virtual int getNGrow (
int = 0,
int = 0)
const {
return 0; }
302 amrex::Abort(
"MLLinOpT::interpAssign: Must be implemented for FMG cycle");
317 amrex::Abort(
"MLLinOpT::interpolationAmr: Must be implemented for composite solves across multiple AMR levels");
330 const MF& fine_sol,
const MF& fine_rhs)
333 amrex::Abort(
"MLLinOpT::averageDownSolutionRHS: Must be implemented for composite solves across multiple AMR levels");
347 virtual void apply (
int amrlev,
int mglev, MF& out, MF& in,
BCMode bc_mode,
359 virtual void smooth (
int amrlev,
int mglev, MF& sol,
const MF& rhs,
360 bool skip_fillboundary=
false)
const = 0;
363 virtual void normalize (
int amrlev,
int mglev, MF& mf)
const {
377 const MF* crse_bcdata=
nullptr) = 0;
393 BCMode bc_mode,
const MF* crse_bcdata=
nullptr) = 0;
407 MF& res,
const MF& crse_sol,
const MF& crse_rhs,
408 MF& fine_res, MF& fine_sol,
const MF& fine_rhs)
const
412 amrex::Abort(
"MLLinOpT::reflux: Must be implemented for composite solves across multiple AMR levels");
426 amrex::Abort(
"AMReX_MLLinOp::compFlux::How did we get here?");
440 amrex::Abort(
"AMReX_MLLinOp::compGrad::How did we get here?");
462 MF
const& )
const {
return {}; }
477 amrex::Warning(
"This function might need to be implemented for GMRES to work with this LinOp.");
481 [[nodiscard]]
virtual bool isSingular (
int amrlev)
const = 0;
486 virtual RT xdoty (
int amrlev,
int mglev,
const MF& x,
const MF& y,
bool local)
const = 0;
488 virtual std::unique_ptr<MLLinOpT<MF>>
makeNLinOp (
int )
const
490 amrex::Abort(
"MLLinOp::makeNLinOp: N-Solve not supported");
497 amrex::Abort(
"MLLinOp::getFluxes: How did we get here?");
501 amrex::Abort(
"MLLinOp::getFluxes: How did we get here?");
507 amrex::Abort(
"MLLinOp::getEBFluxes: How did we get here?");
511 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
513 amrex::Abort(
"MLLinOp::makeHypre: How did we get here?");
516 [[nodiscard]]
virtual std::unique_ptr<HypreNodeLap> makeHypreNodeLap(
518 const std::string& )
const
520 amrex::Abort(
"MLLinOp::makeHypreNodeLap: How did we get here?");
525 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
526 [[nodiscard]]
virtual std::unique_ptr<PETScABecLap> makePETSc ()
const {
527 amrex::Abort(
"MLLinOp::makePETSc: How did we get here?");
538 [[nodiscard]]
virtual RT normInf (
int amrlev, MF
const& mf,
bool local)
const = 0;
545 amrex::Abort(
"MLLinOpT::avgDownResAmr: Must be implemented for composite solves across multiple AMR levels");
551 [[nodiscard]]
bool isMFIterSafe (
int amrlev,
int mglev1,
int mglev2)
const;
559 [[nodiscard]]
const Geometry&
Geom (
int amr_lev,
int mglev=0) const noexcept {
return m_geom[amr_lev][mglev]; }
681 [[nodiscard]]
virtual MF
make (
int amrlev,
int mglev,
IntVect const& ng)
const;
683 [[nodiscard]]
virtual MF
makeAlias (MF
const& mf)
const;
689 [[nodiscard]]
virtual std::unique_ptr<FabFactory<FAB> >
makeFactory (
int ,
int )
const {
690 return std::make_unique<DefaultFabFactory<FAB>>();
699 template <
typename T>
703 return Array4<T>(a.dataPtr(), {a.begin.y,a.begin.z,0}, {a.end.y,a.end.z,1}, a.nComp());
705 return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.z,0}, {a.end.x,a.end.z,1}, a.nComp());
707 return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.y,0}, {a.end.x,a.end.y,1}, a.nComp());
713 template <
typename T>
714 [[nodiscard]] T
get_d0 (T
const& dx, T
const& dy, T
const&)
const noexcept
723 template <
typename T>
724 [[nodiscard]] T
get_d1 (T
const&, T
const& dy, T
const& dz)
const noexcept
742 int ratio,
int strategy);
755 template <
typename MF>
762 [[maybe_unused]]
bool eb_limit_coarsening)
771 if (info.con_grid_size <= 0) { info.con_grid_size =
AMREX_D_PICK(32, 16, 8); }
781 if (!a_factory.empty() && eb_limit_coarsening) {
784 info.max_coarsening_level =
std::min(info.max_coarsening_level,
785 f->maxCoarseningLevel());
789 defineGrids(a_geom, a_grids, a_dmap, a_factory);
793 template <
typename MF>
803 if ( ! a_factory.empty() ) {
805 if (ebf && !(ebf->isAllRegular())) {
806 mg_domain_min_width = 4;
811 m_num_amr_levels = 0;
812 for (
int amrlev = 0; amrlev < a_geom.
size(); amrlev++) {
813 if (!a_grids[amrlev].empty()) {
818 m_amr_ref_ratio.resize(m_num_amr_levels);
819 m_num_mg_levels.resize(m_num_amr_levels);
821 m_geom.resize(m_num_amr_levels);
822 m_grids.resize(m_num_amr_levels);
823 m_dmap.resize(m_num_amr_levels);
824 m_factory.resize(m_num_amr_levels);
828 const RealBox& rb = a_geom[0].ProbDomain();
829 const int coord = a_geom[0].Coord();
832 IntVect mg_coarsen_ratio_v(mg_coarsen_ratio);
833 IntVect mg_box_min_width_v(mg_box_min_width);
834 IntVect mg_domain_min_width_v(mg_domain_min_width);
835 if (hasHiddenDimension()) {
837 "Hidden direction only supported for 3d level solve");
838 mg_coarsen_ratio_v[info.hidden_direction] = 1;
839 mg_box_min_width_v[info.hidden_direction] = 0;
840 mg_domain_min_width_v[info.hidden_direction] = 0;
844 for (
int amrlev = m_num_amr_levels-1; amrlev > 0; --amrlev)
846 m_num_mg_levels[amrlev] = 1;
847 m_geom[amrlev].push_back(a_geom[amrlev]);
848 m_grids[amrlev].push_back(a_grids[amrlev]);
849 m_dmap[amrlev].push_back(a_dmap[amrlev]);
850 if (amrlev < a_factory.size()) {
851 m_factory[amrlev].emplace_back(a_factory[amrlev]->clone());
856 IntVect rr = mg_coarsen_ratio_v;
857 const Box& dom = a_geom[amrlev].Domain();
858 for (
int i = 0; i < 2; ++i)
863 if (cdom == a_geom[amrlev-1].Domain()) {
break; }
865 ++(m_num_mg_levels[amrlev]);
867 m_geom[amrlev].emplace_back(cdom, rb, coord, is_per);
869 m_grids[amrlev].push_back(a_grids[amrlev]);
871 m_grids[amrlev].back().coarsen(rr);
873 m_dmap[amrlev].push_back(a_dmap[amrlev]);
875 rr *= mg_coarsen_ratio_v;
878 #if (AMREX_SPACEDIM > 1)
879 if (hasHiddenDimension()) {
880 m_amr_ref_ratio[amrlev-1] = rr[AMREX_SPACEDIM-info.hidden_direction];
884 m_amr_ref_ratio[amrlev-1] = rr[0];
889 m_num_mg_levels[0] = 1;
890 m_geom[0].push_back(a_geom[0]);
891 m_grids[0].push_back(a_grids[0]);
892 m_dmap[0].push_back(a_dmap[0]);
893 if (!a_factory.empty()) {
894 m_factory[0].emplace_back(a_factory[0]->clone());
899 m_domain_covered.
resize(m_num_amr_levels,
false);
900 auto npts0 = m_grids[0][0].numPts();
901 m_domain_covered[0] = (npts0 == compactify(m_geom[0][0].Domain()).numPts());
902 for (
int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
904 if (!m_domain_covered[amrlev-1]) {
break; }
905 m_domain_covered[amrlev] = (m_grids[amrlev][0].numPts() ==
906 compactify(m_geom[amrlev][0].Domain()).numPts());
910 bool aggable =
false;
912 if (m_grids[0][0].
size() > 1 && info.do_agglomeration)
914 if (m_domain_covered[0])
916 aggbox = m_geom[0][0].Domain();
917 if (hasHiddenDimension()) {
918 aggbox.
makeSlab(hiddenDirection(), m_grids[0][0][0].smallEnd(hiddenDirection()));
924 aggbox = m_grids[0][0].minimalBox();
925 aggable = (aggbox.
numPts() == npts0);
931 int agg_lev = 0, con_lev = 0;
934 && info.semicoarsening_direction >= -1
935 && info.semicoarsening_direction < AMREX_SPACEDIM );
937 if (info.do_agglomeration && aggable)
939 Box dbx = m_geom[0][0].Domain();
941 Real
const nbxs =
static_cast<Real
>(m_grids[0][0].size());
942 Real
const threshold_npts =
static_cast<Real
>(
AMREX_D_TERM(info.agg_grid_size,
944 *info.agg_grid_size));
951 for (
int lev = 0; lev < info.max_coarsening_level; ++lev)
953 IntVect rr_level = mg_coarsen_ratio_v;
954 bool const do_semicoarsening_level = info.do_semicoarsening
955 && numsclevs < info.max_semicoarsening_level;
956 if (do_semicoarsening_level
957 && info.semicoarsening_direction != -1)
959 rr_level[info.semicoarsening_direction] = 1;
962 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
964 rr_dir[idim] = rr_level[idim];
965 is_coarsenable[idim] = dbx.
coarsenable(rr_dir, mg_domain_min_width_v)
967 if (!is_coarsenable[idim] && do_semicoarsening_level
968 && info.semicoarsening_direction == -1)
970 is_coarsenable[idim] =
true;
977 if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
979 int n_ones =
AMREX_D_TERM(
static_cast<int>(rr_level[0] == 1),
980 +
static_cast<int>(rr_level[1] == 1),
981 +
static_cast<int>(rr_level[2] == 1));
982 if (n_ones > 1) {
break; }
984 if (rr_level != mg_coarsen_ratio_v) {
988 accum_coarsen_ratio.push_back(accum_coarsen_ratio.back()*rr_level);
989 domainboxes.push_back(dbx.
coarsen(rr_level));
990 boundboxes.push_back(bbx.
coarsen(rr_level));
991 bool to_agg = (bbx.
d_numPts() / nbxs) < 0.999*threshold_npts;
992 agg_flag.push_back(to_agg);
995 for (
int lev = 1, nlevs =
static_cast<int>(domainboxes.size()); lev < nlevs; ++lev) {
996 if (!agged && !agg_flag[lev] &&
997 a_grids[0].coarsenable(accum_coarsen_ratio[lev], mg_box_min_width_v))
999 m_grids[0].push_back(
amrex::coarsen(a_grids[0], accum_coarsen_ratio[lev]));
1000 m_dmap[0].push_back(a_dmap[0]);
1002 IntVect cr = domainboxes[lev-1].length() / domainboxes[lev].length();
1003 if (!m_grids[0].back().coarsenable(cr)) {
1006 m_grids[0].emplace_back(boundboxes[lev]);
1008 if (info.do_semicoarsening && info.max_semicoarsening_level >= lev
1009 && info.semicoarsening_direction != -1)
1025 m_geom[0].emplace_back(domainboxes[lev],rb,coord,is_per);
1030 Long consolidation_threshold = 0;
1031 Real avg_npts = 0.0;
1032 if (info.do_consolidation) {
1034 consolidation_threshold =
AMREX_D_TERM(Long(info.con_grid_size),
1035 *info.con_grid_size,
1036 *info.con_grid_size);
1039 Box const& dom0 = a_geom[0].Domain();
1042 for (
int lev = 0; lev < info.max_coarsening_level; ++lev)
1044 IntVect rr_level = mg_coarsen_ratio_v;
1045 bool do_semicoarsening_level = info.do_semicoarsening
1046 && numsclevs < info.max_semicoarsening_level;
1047 if (do_semicoarsening_level
1048 && info.semicoarsening_direction != -1)
1050 rr_level[info.semicoarsening_direction] = 1;
1053 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1055 rr_dir[idim] = rr_vec[idim] * rr_level[idim];
1056 is_coarsenable[idim] = dom0.
coarsenable(rr_dir, mg_domain_min_width_v)
1057 && a_grids[0].coarsenable(rr_dir, mg_box_min_width_v);
1058 if (!is_coarsenable[idim] && do_semicoarsening_level
1059 && info.semicoarsening_direction == -1)
1061 is_coarsenable[idim] =
true;
1068 if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
1070 int n_ones =
AMREX_D_TERM(
static_cast<int>(rr_level[0] == 1),
1071 +
static_cast<int>(rr_level[1] == 1),
1072 +
static_cast<int>(rr_level[2] == 1));
1073 if (n_ones > 1) {
break; }
1075 if (rr_level != mg_coarsen_ratio_v) {
1080 m_geom[0].emplace_back(
amrex::coarsen(dom0, rr_vec), rb, coord, is_per);
1083 if (info.do_consolidation)
1085 if (avg_npts/
static_cast<Real
>(
AMREX_D_TERM(rr_vec[0], *rr_vec[1], *rr_vec[2]))
1086 < Real(0.999)*
static_cast<Real
>(consolidation_threshold))
1089 con_lev = m_dmap[0].
size();
1094 m_dmap[0].push_back(m_dmap[0].back());
1099 m_dmap[0].push_back(a_dmap[0]);
1104 m_num_mg_levels[0] = m_grids[0].size();
1106 for (
int mglev = 0; mglev < m_num_mg_levels[0] - 1; mglev++){
1107 const Box& fine_domain = m_geom[0][mglev].Domain();
1108 const Box& crse_domain = m_geom[0][mglev+1].Domain();
1109 mg_coarsen_ratio_vec.push_back(fine_domain.
length()/crse_domain.
length());
1112 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev) {
1113 if (AMRRefRatio(amrlev) == 4 && mg_coarsen_ratio_vec.empty()) {
1114 mg_coarsen_ratio_vec.push_back(
IntVect(2));
1120 makeAgglomeratedDMap(m_grids[0], m_dmap[0]);
1124 makeConsolidatedDMap(m_grids[0], m_dmap[0], info.con_ratio, info.con_strategy);
1129 m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1133 m_bottom_comm = m_default_comm;
1136 m_do_agglomeration = agged;
1137 m_do_consolidation = coned;
1141 Print() <<
"MLLinOp::defineGrids(): agglomerated AMR level 0 starting at MG level "
1142 << agg_lev <<
" of " << m_num_mg_levels[0] <<
"\n";
1144 Print() <<
"MLLinOp::defineGrids(): consolidated AMR level 0 starting at MG level "
1145 << con_lev <<
" of " << m_num_mg_levels[0]
1146 <<
" (ratio = " << info.con_ratio <<
")" <<
"\n";
1148 Print() <<
"MLLinOp::defineGrids(): no agglomeration or consolidation of AMR level 0\n";
1152 for (
int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
1154 for (
int mglev = 1; mglev < m_num_mg_levels[amrlev]; ++mglev)
1156 m_factory[amrlev].emplace_back(makeFactory(amrlev,mglev));
1160 for (
int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
1163 "MLLinOp: grids not coarsenable between AMR levels");
1167 template <
typename MF>
1171 m_needs_coarse_data_for_bc = !m_domain_covered[0];
1173 levelbc_raii.resize(m_num_amr_levels);
1174 robin_a_raii.resize(m_num_amr_levels);
1175 robin_b_raii.resize(m_num_amr_levels);
1176 robin_f_raii.resize(m_num_amr_levels);
1179 template <
typename MF>
1184 const int ncomp = getNComp();
1189 template <
typename MF>
1194 const int ncomp = getNComp();
1196 "MLLinOp::setDomainBC: wrong size");
1199 m_lobc_orig = m_lobc;
1200 m_hibc_orig = m_hibc;
1201 for (
int icomp = 0; icomp < ncomp; ++icomp) {
1202 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1203 if (m_geom[0][0].isPeriodic(idim)) {
1205 m_hibc[icomp][idim] == BCType::Periodic);
1208 m_hibc[icomp][idim] != BCType::Periodic);
1211 if (m_lobc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1212 m_lobc[icomp][idim] == LinOpBCType::Robin)
1214 m_lobc[icomp][idim] = LinOpBCType::Neumann;
1217 if (m_hibc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1218 m_hibc[icomp][idim] == LinOpBCType::Robin)
1220 m_hibc[icomp][idim] = LinOpBCType::Neumann;
1225 if (hasHiddenDimension()) {
1226 const int hd = hiddenDirection();
1227 for (
int n = 0; n < ncomp; ++n) {
1228 m_lobc[n][hd] = LinOpBCType::Neumann;
1229 m_hibc[n][hd] = LinOpBCType::Neumann;
1233 if (hasInhomogNeumannBC() && !supportInhomogNeumannBC()) {
1234 amrex::Abort(
"Inhomogeneous Neumann BC not supported");
1236 if (hasRobinBC() && !supportRobinBC()) {
1241 template <
typename MF>
1245 int ncomp = m_lobc_orig.size();
1246 for (
int n = 0; n < ncomp; ++n) {
1247 for (
int idim = 0; idim <AMREX_SPACEDIM; ++idim) {
1248 if (m_lobc_orig[n][idim] == bct || m_hibc_orig[n][idim] == bct) {
1256 template <
typename MF>
1260 return hasBC(BCType::inhomogNeumann);
1263 template <
typename MF>
1267 return hasBC(BCType::Robin);
1270 template <
typename MF>
1274 #if (AMREX_SPACEDIM == 3)
1275 if (info.hasHiddenDimension()) {
1276 const auto& lo =
b.smallEnd();
1277 const auto& hi =
b.bigEnd();
1278 if (info.hidden_direction == 0) {
1280 }
else if (info.hidden_direction == 1) {
1292 template <
typename MF>
1300 for (
int i = 1, N=
static_cast<int>(ba.
size()); i < N; ++i)
1310 for (
int iproc = 0; iproc < nprocs; ++iproc) {
1312 for (
int ibox : sfc[iproc]) {
1316 dm[i].define(std::move(pmap));
1321 template <
typename MF>
1325 int ratio,
int strategy)
1327 BL_PROFILE(
"MLLinOp::makeConsolidatedDMap()");
1331 for (
int i = 1, N=
static_cast<int>(ba.
size()); i < N; ++i)
1338 const auto& pmap_fine = dm[i-1].ProcessorMap();
1341 if (strategy == 1) {
1342 for (
auto&
x: pmap) {
1345 }
else if (strategy == 2) {
1346 int nprocs_con =
static_cast<int>(std::ceil(
static_cast<Real
>(nprocs)
1347 /
static_cast<Real
>(factor)));
1348 for (
auto&
x: pmap) {
1349 auto d = std::div(
x,nprocs_con);
1352 }
else if (strategy == 3) {
1353 if (factor == ratio) {
1355 for (
int iproc = 0; iproc < nprocs; ++iproc) {
1356 for (
int ibox : sfc[iproc]) {
1361 for (
auto&
x: pmap) {
1367 dm[i].define(std::move(pmap));
1371 dm[i].define(std::move(pmap_g));
1377 template <
typename MF>
1381 BL_PROFILE(
"MLLinOp::makeSubCommunicator()");
1386 std::sort(newgrp_ranks.begin(), newgrp_ranks.end());
1387 auto last = std::unique(newgrp_ranks.begin(), newgrp_ranks.end());
1388 newgrp_ranks.erase(last, newgrp_ranks.end());
1392 MPI_Comm_group(m_default_comm, &defgrp);
1394 MPI_Group_incl(defgrp,
static_cast<int>(newgrp_ranks.
size()), newgrp_ranks.data(), &newgrp);
1398 newgrp_ranks.data(),
static_cast<int>(newgrp_ranks.
size()));
1399 MPI_Group_incl(defgrp,
static_cast<int>(local_newgrp_ranks.
size()), local_newgrp_ranks.data(), &newgrp);
1402 MPI_Comm_create(m_default_comm, newgrp, &newcomm);
1404 m_raii_comm = std::make_unique<CommContainer>(newcomm);
1406 MPI_Group_free(&defgrp);
1407 MPI_Group_free(&newgrp);
1412 return m_default_comm;
1416 template <
typename MF>
1421 m_domain_bloc_lo = lo_bcloc;
1422 m_domain_bloc_hi = hi_bcloc;
1425 template <
typename MF>
1428 LinOpBCType bc_type) noexcept
1430 setCoarseFineBC(
crse,
IntVect(crse_ratio), bc_type);
1433 template <
typename MF>
1436 LinOpBCType bc_type) noexcept
1438 m_coarse_data_for_bc =
crse;
1439 m_coarse_data_crse_ratio = crse_ratio;
1440 m_coarse_fine_bc_type = bc_type;
1443 template <
typename MF>
1444 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int>>
1447 LinOpBCType bc_type) noexcept
1449 setCoarseFineBC(
crse,
IntVect(crse_ratio), bc_type);
1452 template <
typename MF>
1453 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int>>
1456 LinOpBCType bc_type) noexcept
1458 m_coarse_data_for_bc_raii = MF(
crse->boxArray(),
crse->DistributionMap(),
1460 m_coarse_data_for_bc_raii.LocalCopy(*
crse, 0, 0,
crse->
nComp(),
1462 m_coarse_data_for_bc = &m_coarse_data_for_bc_raii;
1463 m_coarse_data_crse_ratio = crse_ratio;
1464 m_coarse_fine_bc_type = bc_type;
1467 template <
typename MF>
1472 mf.resize(m_num_amr_levels);
1473 for (
int alev = 0; alev < m_num_amr_levels; ++alev) {
1474 mf[alev].resize(m_num_mg_levels[alev]);
1475 for (
int mlev = 0; mlev < m_num_mg_levels[alev]; ++mlev) {
1476 mf[alev][mlev] = make(alev, mlev, ng);
1481 template <
typename MF>
1485 if constexpr (IsMultiFabLike_v<MF>) {
1487 m_dmap[amrlev][mglev], getNComp(), ng,
MFInfo(),
1488 *m_factory[amrlev][mglev]);
1496 template <
typename MF>
1500 if constexpr (IsMultiFabLike_v<MF>) {
1504 amrex::Abort(
"MLLinOpT::makeAlias: how did we get here?");
1509 template <
typename MF>
1513 if constexpr (IsMultiFabLike_v<MF>) {
1514 BoxArray cba = m_grids[amrlev][mglev];
1515 IntVect ratio = (amrlev > 0) ?
IntVect(2) : mg_coarsen_ratio_vec[mglev];
1518 return MF(cba, m_dmap[amrlev][mglev], getNComp(), ng);
1521 amrex::Abort(
"MLLinOpT::makeCoarseMG: how did we get here?");
1526 template <
typename MF>
1530 if constexpr (IsMultiFabLike_v<MF>) {
1531 BoxArray cba = m_grids[famrlev][0];
1532 IntVect ratio(AMRRefRatio(famrlev-1));
1535 return MF(cba, m_dmap[famrlev][0], getNComp(), ng);
1538 amrex::Abort(
"MLLinOpT::makeCoarseAmr: how did we get here?");
1543 template <
typename MF>
1547 if (new_size <= 0 || new_size >= m_num_mg_levels[0]) {
return; }
1549 m_num_mg_levels[0] = new_size;
1551 m_geom[0].resize(new_size);
1552 m_grids[0].resize(new_size);
1553 m_dmap[0].resize(new_size);
1554 m_factory[0].resize(new_size);
1556 if (m_bottom_comm != m_default_comm) {
1557 m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1561 template <
typename MF>
1567 const int ncomp = this->getNComp();
1569 if (!fres.isAllRegular()) {
1570 if constexpr (std::is_same<MF,MultiFab>()) {
1572 mg_coarsen_ratio_vec[clev-1]);
1574 amrex::Abort(
"EB_average_down only works with MultiFab");
1582 amrex::Abort(
"For non-FabArray, MLLinOpT<MF>::avgDownResMG should be overridden.");
1586 template <
typename MF>
1590 return m_dmap[amrlev][mglev1] == m_dmap[amrlev][mglev2]
1594 template <
typename MF>
1595 template <
typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,
int>>
1598 const AMF* robinbc_a,
const AMF* robinbc_b,
1599 const AMF* robinbc_f)
1601 const int ncomp = this->getNComp();
1603 levelbc_raii[amrlev] = std::make_unique<MF>(levelbcdata->boxArray(),
1604 levelbcdata->DistributionMap(),
1605 ncomp, levelbcdata->nGrowVect());
1606 levelbc_raii[amrlev]->LocalCopy(*levelbcdata, 0, 0, ncomp,
1607 levelbcdata->nGrowVect());
1609 levelbc_raii[amrlev].reset();
1613 robin_a_raii[amrlev] = std::make_unique<MF>(robinbc_a->boxArray(),
1614 robinbc_a->DistributionMap(),
1615 ncomp, robinbc_a->nGrowVect());
1616 robin_a_raii[amrlev]->LocalCopy(*robinbc_a, 0, 0, ncomp,
1617 robinbc_a->nGrowVect());
1619 robin_a_raii[amrlev].reset();
1623 robin_b_raii[amrlev] = std::make_unique<MF>(robinbc_b->boxArray(),
1624 robinbc_b->DistributionMap(),
1625 ncomp, robinbc_b->nGrowVect());
1626 robin_b_raii[amrlev]->LocalCopy(*robinbc_b, 0, 0, ncomp,
1627 robinbc_b->nGrowVect());
1629 robin_b_raii[amrlev].reset();
1633 robin_f_raii[amrlev] = std::make_unique<MF>(robinbc_f->boxArray(),
1634 robinbc_f->DistributionMap(),
1635 ncomp, robinbc_f->nGrowVect());
1636 robin_f_raii[amrlev]->LocalCopy(*robinbc_f, 0, 0, ncomp,
1637 robinbc_f->nGrowVect());
1639 robin_f_raii[amrlev].reset();
1642 this->setLevelBC(amrlev, levelbc_raii[amrlev].
get(), robin_a_raii[amrlev].
get(),
1643 robin_b_raii[amrlev].
get(), robin_f_raii[amrlev].
get());
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_PICK(a, b, c)
Definition: AMReX_SPACE.H:151
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
int MPI_Comm
Definition: AMReX_ccse-mpi.H:47
int MPI_Group
Definition: AMReX_ccse-mpi.H:48
static constexpr int MPI_COMM_NULL
Definition: AMReX_ccse-mpi.H:55
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:819
BoxArray & convert(IndexType typ)
Apply Box::convert(IndexType) to each Box in the BoxArray.
AMREX_GPU_HOST_DEVICE double d_numPts() const noexcept
Returns the number of points contained in the BoxND. This is intended for use only in diagnostic mess...
Definition: AMReX_Box.H:366
AMREX_GPU_HOST_DEVICE BoxND & makeSlab(int direction, int slab_index) noexcept
Definition: AMReX_Box.H:779
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool coarsenable(const IntVectND< dim > &refrat, const IntVectND< dim > &min_width) const noexcept
Definition: AMReX_Box.H:745
AMREX_GPU_HOST_DEVICE BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:708
AMREX_GPU_HOST_DEVICE Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition: AMReX_Box.H:346
Definition: AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
static DistributionMapping makeSFC(const MultiFab &weight, bool sort=true)
Definition: AMReX_DistributionMapping.cpp:1757
const Vector< int > & ProcessorMap() const noexcept
Returns a constant reference to the mapping of boxes in the underlying BoxArray to the CPU that holds...
Definition: AMReX_DistributionMapping.cpp:47
Definition: AMReX_EBFabFactory.H:22
Definition: AMReX_FabFactory.H:50
Solve using GMRES with multigrid as preconditioner.
Definition: AMReX_GMRES_MLMG.H:20
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Interface
Definition: AMReX_Hypre.H:21
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE std::size_t size() noexcept
Definition: AMReX_IntVect.H:725
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition: AMReX_IntVect.H:768
Definition: AMReX_MLABecLaplacian.H:15
Definition: AMReX_MLCGSolver.H:12
Definition: AMReX_MLLinOp.H:98
const Geometry & Geom(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:559
const MF * m_coarse_data_for_bc
Definition: AMReX_MLLinOp.H:628
virtual void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary=false) const =0
Smooth.
void setDomainBC(const Array< BCType, AMREX_SPACEDIM > &lobc, const Array< BCType, AMREX_SPACEDIM > &hibc) noexcept
Boundary of the whole domain.
Definition: AMReX_MLLinOp.H:1181
Vector< Vector< std::unique_ptr< FabFactory< FAB > > > > m_factory
Definition: AMReX_MLLinOp.H:601
virtual void avgDownResMG(int clev, MF &cres, MF const &fres) const
Definition: AMReX_MLLinOp.H:1563
int NAMRLevels() const noexcept
Return the number of AMR levels.
Definition: AMReX_MLLinOp.H:554
bool m_do_consolidation
Definition: AMReX_MLLinOp.H:592
virtual void compGrad(int, const Array< MF *, AMREX_SPACEDIM > &, MF &, Location) const
Compute gradients of the solution.
Definition: AMReX_MLLinOp.H:437
bool isCellCentered() const noexcept
Definition: AMReX_MLLinOp.H:673
const Vector< int > & AMRRefRatio() const noexcept
Return AMR refinement ratios.
Definition: AMReX_MLLinOp.H:632
IntVect m_ixtype
Definition: AMReX_MLLinOp.H:589
virtual std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const
Definition: AMReX_MLLinOp.H:488
void setVerbose(int v) noexcept
Set verbosity.
Definition: AMReX_MLLinOp.H:244
Array< Real, AMREX_SPACEDIM > m_domain_bloc_hi
Definition: AMReX_MLLinOp.H:622
bool isMFIterSafe(int amrlev, int mglev1, int mglev2) const
Definition: AMReX_MLLinOp.H:1588
RealVect m_coarse_bc_loc
Definition: AMReX_MLLinOp.H:627
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition: AMReX_MLLinOp.H:266
virtual void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const =0
Add interpolated coarse MG level data to fine MG level data.
virtual void setLevelBC(int, const MF *, const MF *=nullptr, const MF *=nullptr, const MF *=nullptr)=0
Set boundary conditions for given level. For cell-centered solves only.
virtual MF make(int amrlev, int mglev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1483
T get_d0(T const &dx, T const &dy, T const &) const noexcept
Definition: AMReX_MLLinOp.H:714
MPI_Comm BottomCommunicator() const noexcept
Definition: AMReX_MLLinOp.H:664
void setEnforceSingularSolvable(bool o) noexcept
Definition: AMReX_MLLinOp.H:253
MPI_Comm Communicator() const noexcept
Definition: AMReX_MLLinOp.H:665
virtual void getFluxes(const Vector< Array< MF *, AMREX_SPACEDIM > > &, const Vector< MF * > &, Location) const
Definition: AMReX_MLLinOp.H:494
int mg_domain_min_width
Definition: AMReX_MLLinOp.H:573
void setMaxOrder(int o) noexcept
Set order of interpolation at coarse/fine boundary.
Definition: AMReX_MLLinOp.H:247
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc_orig
Definition: AMReX_MLLinOp.H:567
virtual void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const
Overwrite fine MG level data with interpolated coarse data.
Definition: AMReX_MLLinOp.H:299
virtual std::string name() const
Definition: AMReX_MLLinOp.H:131
void setCoarseFineBC(const AMF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1455
bool doAgglomeration() const noexcept
Definition: AMReX_MLLinOp.H:669
Vector< std::unique_ptr< MF > > levelbc_raii
Definition: AMReX_MLLinOp.H:749
MF m_coarse_data_for_bc_raii
Definition: AMReX_MLLinOp.H:629
virtual void setDirichletNodesToZero(int, int, MF &) const
Definition: AMReX_MLLinOp.H:474
FabFactory< FAB > const * Factory(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:637
std::unique_ptr< CommContainer > m_raii_comm
Definition: AMReX_MLLinOp.H:619
bool m_do_semicoarsening
Definition: AMReX_MLLinOp.H:594
Array4< T > compactify(Array4< T > const &a) const noexcept
Definition: AMReX_MLLinOp.H:700
bool hasRobinBC() const noexcept
Definition: AMReX_MLLinOp.H:1265
Array< Real, AMREX_SPACEDIM > m_domain_bloc_lo
Definition: AMReX_MLLinOp.H:621
virtual void resizeMultiGrid(int new_size)
Definition: AMReX_MLLinOp.H:1545
Vector< Vector< BoxArray > > m_grids
Definition: AMReX_MLLinOp.H:599
virtual MF makeCoarseAmr(int famrlev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1528
Vector< std::unique_ptr< MF > > robin_f_raii
Definition: AMReX_MLLinOp.H:752
bool m_do_agglomeration
Definition: AMReX_MLLinOp.H:591
void setCoarseFineBC(const AMF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1446
virtual MF makeAlias(MF const &mf) const
Definition: AMReX_MLLinOp.H:1498
virtual std::unique_ptr< FabFactory< FAB > > makeFactory(int, int) const
Definition: AMReX_MLLinOp.H:689
void setDomainBC(const Vector< Array< BCType, AMREX_SPACEDIM > > &lobc, const Vector< Array< BCType, AMREX_SPACEDIM > > &hibc) noexcept
Boundary of the whole domain.
Definition: AMReX_MLLinOp.H:1191
static constexpr int mg_coarsen_ratio
Definition: AMReX_MLLinOp.H:571
virtual void copyNSolveSolution(MF &, MF const &) const
Definition: AMReX_MLLinOp.H:534
virtual void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr)=0
Compute residual for solution.
int getMaxOrder() const noexcept
Get order of interpolation at coarse/fine boundary.
Definition: AMReX_MLLinOp.H:249
virtual int getNComp() const
Return number of components.
Definition: AMReX_MLLinOp.H:261
virtual void getFluxes(const Vector< MF * > &, const Vector< MF * > &) const
Definition: AMReX_MLLinOp.H:499
void defineBC()
Definition: AMReX_MLLinOp.H:1169
void setCoarseFineBCLocation(const RealVect &cloc) noexcept
Definition: AMReX_MLLinOp.H:667
Vector< int > m_amr_ref_ratio
Definition: AMReX_MLLinOp.H:584
MPI_Comm m_default_comm
Definition: AMReX_MLLinOp.H:604
static void makeAgglomeratedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm)
Definition: AMReX_MLLinOp.H:1294
bool isBottomActive() const noexcept
Definition: AMReX_MLLinOp.H:662
void defineGrids(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const Vector< FabFactory< FAB > const * > &a_factory)
Definition: AMReX_MLLinOp.H:795
virtual BottomSolver getDefaultBottomSolver() const
Definition: AMReX_MLLinOp.H:258
GpuArray< BCType, AMREX_SPACEDIM > LoBC(int icomp=0) const noexcept
Definition: AMReX_MLLinOp.H:641
typename FabDataType< MF >::fab_type FAB
Definition: AMReX_MLLinOp.H:108
virtual RT normInf(int amrlev, MF const &mf, bool local) const =0
Vector< int > m_num_mg_levels
Definition: AMReX_MLLinOp.H:586
bool hasBC(BCType bct) const noexcept
Definition: AMReX_MLLinOp.H:1243
static void makeConsolidatedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm, int ratio, int strategy)
Definition: AMReX_MLLinOp.H:1323
Vector< Vector< DistributionMapping > > m_dmap
Definition: AMReX_MLLinOp.H:600
int verbose
Definition: AMReX_MLLinOp.H:577
virtual void checkPoint(std::string const &) const
Definition: AMReX_MLLinOp.H:745
IntVect m_coarse_data_crse_ratio
Definition: AMReX_MLLinOp.H:626
LinOpBCType BCType
Definition: AMReX_MLLinOp.H:111
virtual void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr)=0
Compute residual for the residual-correction form, resid = b - L(x)
virtual void compFlux(int, const Array< MF *, AMREX_SPACEDIM > &, MF &, Location) const
Compute fluxes.
Definition: AMReX_MLLinOp.H:423
MLLinOpT(MLLinOpT< MF > &&)=delete
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc_orig
Definition: AMReX_MLLinOp.H:566
virtual void postSolve(Vector< MF > &) const
Definition: AMReX_MLLinOp.H:536
void setCoarseFineBC(const MF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Set coarse/fine boundary conditions. For cell-centered solves only.
Definition: AMReX_MLLinOp.H:1427
virtual void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const =0
Apply the linear operator, out = L(in)
void setDomainBCLoc(const Array< Real, AMREX_SPACEDIM > &lo_bcloc, const Array< Real, AMREX_SPACEDIM > &hi_bcloc) noexcept
Set location of domain boundaries.
Definition: AMReX_MLLinOp.H:1418
bool needsCoarseDataForBC() const noexcept
Needs coarse data for bc?
Definition: AMReX_MLLinOp.H:177
typename FabDataType< MF >::value_type RT
Definition: AMReX_MLLinOp.H:109
virtual void update()
Update for reuse.
Definition: AMReX_MLLinOp.H:268
virtual void applyMetricTerm(int, int, MF &) const
apply metric terms if there are any
Definition: AMReX_MLLinOp.H:444
virtual void unapplyMetricTerm(int, int, MF &) const
unapply metric terms if there are any
Definition: AMReX_MLLinOp.H:446
bool hasHiddenDimension() const noexcept
Definition: AMReX_MLLinOp.H:695
virtual bool isBottomSingular() const =0
Is the bottom of MG singular?
virtual void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &crse_rhs, MF &fine_res, MF &fine_sol, const MF &fine_rhs) const
Reflux at AMR coarse/fine boundary.
Definition: AMReX_MLLinOp.H:406
virtual IntVect getNGrowVectRestriction() const
Definition: AMReX_MLLinOp.H:675
Vector< std::unique_ptr< MF > > robin_b_raii
Definition: AMReX_MLLinOp.H:751
virtual Vector< RT > getSolvabilityOffset(int, int, MF const &) const
get offset for fixing solvability
Definition: AMReX_MLLinOp.H:461
static constexpr int mg_box_min_width
Definition: AMReX_MLLinOp.H:572
virtual MF makeCoarseMG(int amrlev, int mglev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1511
virtual void fixSolvabilityByOffset(int, int, MF &, Vector< RT > const &) const
fix solvability by subtracting offset from RHS
Definition: AMReX_MLLinOp.H:465
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc
Definition: AMReX_MLLinOp.H:562
int hiddenDirection() const noexcept
Definition: AMReX_MLLinOp.H:696
Vector< int > m_domain_covered
Definition: AMReX_MLLinOp.H:602
const MLLinOpT< MF > * m_parent
Definition: AMReX_MLLinOp.H:587
bool doSemicoarsening() const noexcept
Definition: AMReX_MLLinOp.H:671
virtual bool supportNSolve() const
Definition: AMReX_MLLinOp.H:532
virtual bool supportRobinBC() const noexcept
Definition: AMReX_MLLinOp.H:656
virtual void normalize(int amrlev, int mglev, MF &mf) const
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition: AMReX_MLLinOp.H:363
virtual void applyInhomogNeumannTerm(int, MF &) const
Extra terms introduced when we treat inhomogeneous Nuemann BC as homogeneous.
Definition: AMReX_MLLinOp.H:452
virtual void avgDownResAmr(int clev, MF &cres, MF const &fres) const
Definition: AMReX_MLLinOp.H:542
Vector< Vector< Geometry > > m_geom
first Vector is for amr level and second is mg level
Definition: AMReX_MLLinOp.H:598
MLLinOpT(const MLLinOpT< MF > &)=delete
virtual void averageDownAndSync(Vector< MF > &sol) const =0
void setCoarseFineBC(const MF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1435
MLLinOpT< MF > & operator=(const MLLinOpT< MF > &)=delete
Box compactify(Box const &b) const noexcept
Definition: AMReX_MLLinOp.H:1272
bool m_needs_coarse_data_for_bc
Definition: AMReX_MLLinOp.H:624
int maxorder
Definition: AMReX_MLLinOp.H:579
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition: AMReX_MLLinOp.H:757
Vector< IntVect > mg_coarsen_ratio_vec
Definition: AMReX_MLLinOp.H:595
MF MFType
Definition: AMReX_MLLinOp.H:107
virtual ~MLLinOpT()=default
virtual void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs)
Average-down data from fine AMR level to coarse AMR level.
Definition: AMReX_MLLinOp.H:329
LPInfo info
Definition: AMReX_MLLinOp.H:575
MPI_Comm makeSubCommunicator(const DistributionMapping &dm)
Definition: AMReX_MLLinOp.H:1379
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc
Definition: AMReX_MLLinOp.H:563
int NMGLevels(int amrlev) const noexcept
Return the number of MG levels at given AMR level.
Definition: AMReX_MLLinOp.H:557
virtual void scaleRHS(int, MF &) const
scale RHS to fix solvability
Definition: AMReX_MLLinOp.H:458
T get_d1(T const &, T const &dy, T const &dz) const noexcept
Definition: AMReX_MLLinOp.H:724
bool enforceSingularSolvable
Definition: AMReX_MLLinOp.H:581
void setLevelBC(int amrlev, const AMF *levelbcdata, const AMF *robinbc_a=nullptr, const AMF *robinbc_b=nullptr, const AMF *robinbc_f=nullptr)
Definition: AMReX_MLLinOp.H:1597
virtual RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const =0
x dot y, used by the bottom solver
virtual void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const
Interpolation between AMR levels.
Definition: AMReX_MLLinOp.H:313
bool doConsolidation() const noexcept
Definition: AMReX_MLLinOp.H:670
virtual bool supportInhomogNeumannBC() const noexcept
Definition: AMReX_MLLinOp.H:657
virtual void prepareForFluxes(int, const MF *=nullptr)
Definition: AMReX_MLLinOp.H:379
LinOpBCType m_coarse_fine_bc_type
Definition: AMReX_MLLinOp.H:625
virtual bool isSingular(int amrlev) const =0
Is it singular on given AMR level?
GpuArray< BCType, AMREX_SPACEDIM > HiBC(int icomp=0) const noexcept
Definition: AMReX_MLLinOp.H:646
int AMRRefRatio(int amr_lev) const noexcept
Return AMR refinement ratio at given AMR level.
Definition: AMReX_MLLinOp.H:635
MPI_Comm m_bottom_comm
Definition: AMReX_MLLinOp.H:605
virtual void restriction(int amrlev, int cmglev, MF &crse, MF &fine) const =0
Restriction onto coarse MG level.
virtual void applyOverset(int, MF &) const
for overset solver only
Definition: AMReX_MLLinOp.H:455
virtual void unimposeNeumannBC(int, MF &) const
This is needed for our nodal projection solver.
Definition: AMReX_MLLinOp.H:449
virtual void prepareForGMRES()
Definition: AMReX_MLLinOp.H:470
bool getEnforceSingularSolvable() const noexcept
Definition: AMReX_MLLinOp.H:256
virtual void prepareForSolve()=0
virtual void make(Vector< Vector< MF > > &mf, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1469
bool hasInhomogNeumannBC() const noexcept
Definition: AMReX_MLLinOp.H:1258
Vector< std::unique_ptr< MF > > robin_a_raii
Definition: AMReX_MLLinOp.H:750
virtual int getNGrow(int=0, int=0) const
Definition: AMReX_MLLinOp.H:263
int m_num_amr_levels
Definition: AMReX_MLLinOp.H:583
Definition: AMReX_MLMGBndry.H:12
Definition: AMReX_MLMG.H:12
Definition: AMReX_MLPoisson.H:16
This class provides the user with a few print options.
Definition: AMReX_Print.H:35
A Box with real dimensions. A RealBox is OK iff volume >= 0.
Definition: AMReX_RealBox.H:21
A Real vector in SpaceDim-dimensional space.
Definition: AMReX_RealVect.H:32
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
AMREX_EXPORT int max_grid_size
Definition: AMReX_EB2.cpp:23
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
int local_to_global_rank(int rank) noexcept
translate between local rank and global rank
Definition: AMReX_ParallelContext.H:95
int global_to_local_rank(int rank) noexcept
Definition: AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition: AMReX_ParallelContext.H:74
MPI_Comm Communicator() noexcept
Definition: AMReX_ParallelDescriptor.H:210
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
@ min
Definition: AMReX_ParallelReduce.H:18
Definition: AMReX_Amr.cpp:49
@ make_alias
Definition: AMReX_MakeType.H:7
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > enclosedCells(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with CELL based coordinates in direction dir that is enclosed by b....
Definition: AMReX_Box.H:1463
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition: AMReX_Tuple.H:179
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition: AMReX_EBMultiFabUtil.cpp:336
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
BottomSolver
Definition: AMReX_MLLinOp.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Warning(const std::string &msg)
Print out warning message to cerr.
Definition: AMReX.cpp:231
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
const int[]
Definition: AMReX_BLProfiler.cpp:1664
int verbose
Definition: AMReX_DistributionMapping.cpp:36
std::unique_ptr< Hypre > makeHypre(const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask)
Definition: AMReX_Hypre.cpp:12
std::array< T, N > Array
Definition: AMReX_Array.H:24
Definition: AMReX_Array4.H:61
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int nComp() const noexcept
Definition: AMReX_Array4.H:248
Definition: AMReX_FabDataType.H:9
Definition: AMReX_Array.H:34
Definition: AMReX_TypeTraits.H:29
Definition: AMReX_MLLinOp.H:35
int con_strategy
Definition: AMReX_MLLinOp.H:42
bool do_semicoarsening
Definition: AMReX_MLLinOp.H:38
bool has_metric_term
Definition: AMReX_MLLinOp.H:43
LPInfo & setAgglomerationGridSize(int x) noexcept
Definition: AMReX_MLLinOp.H:52
int max_semicoarsening_level
Definition: AMReX_MLLinOp.H:45
bool hasHiddenDimension() const noexcept
Definition: AMReX_MLLinOp.H:62
LPInfo & setSemicoarsening(bool x) noexcept
Definition: AMReX_MLLinOp.H:51
LPInfo & setMaxCoarseningLevel(int n) noexcept
Definition: AMReX_MLLinOp.H:57
LPInfo & setConsolidationGridSize(int x) noexcept
Definition: AMReX_MLLinOp.H:53
LPInfo & setAgglomeration(bool x) noexcept
Definition: AMReX_MLLinOp.H:49
int con_ratio
Definition: AMReX_MLLinOp.H:41
bool do_consolidation
Definition: AMReX_MLLinOp.H:37
int con_grid_size
Definition: AMReX_MLLinOp.H:40
bool do_agglomeration
Definition: AMReX_MLLinOp.H:36
LPInfo & setHiddenDirection(int n) noexcept
Definition: AMReX_MLLinOp.H:60
static constexpr int getDefaultConsolidationGridSize()
Definition: AMReX_MLLinOp.H:74
LPInfo & setConsolidation(bool x) noexcept
Definition: AMReX_MLLinOp.H:50
int max_coarsening_level
Definition: AMReX_MLLinOp.H:44
LPInfo & setMaxSemicoarseningLevel(int n) noexcept
Definition: AMReX_MLLinOp.H:58
int agg_grid_size
Definition: AMReX_MLLinOp.H:39
static constexpr int getDefaultAgglomerationGridSize()
Definition: AMReX_MLLinOp.H:66
LPInfo & setConsolidationStrategy(int x) noexcept
Definition: AMReX_MLLinOp.H:55
int hidden_direction
Definition: AMReX_MLLinOp.H:47
LPInfo & setMetricTerm(bool x) noexcept
Definition: AMReX_MLLinOp.H:56
LPInfo & setSemicoarseningDirection(int n) noexcept
Definition: AMReX_MLLinOp.H:59
LPInfo & setConsolidationRatio(int x) noexcept
Definition: AMReX_MLLinOp.H:54
int semicoarsening_direction
Definition: AMReX_MLLinOp.H:46
Definition: AMReX_MLLinOp.H:84
StateMode
Definition: AMReX_MLLinOp.H:86
BCMode
Definition: AMReX_MLLinOp.H:85
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MLLinOp.H:606
void operator=(const CommContainer &)=delete
~CommContainer()
Definition: AMReX_MLLinOp.H:613
MPI_Comm comm
Definition: AMReX_MLLinOp.H:607
CommContainer(const CommContainer &)=delete
CommContainer(CommContainer &&)=delete
CommContainer(MPI_Comm m) noexcept
Definition: AMReX_MLLinOp.H:608