Block-Structured AMR Software Framework
AMReX_MLCellLinOp.H
Go to the documentation of this file.
1 #ifndef AMREX_ML_CELL_LINOP_H_
2 #define AMREX_ML_CELL_LINOP_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MLLinOp.H>
6 #include <AMReX_iMultiFab.H>
7 #include <AMReX_MultiFabUtil.H>
8 #include <AMReX_YAFluxRegister.H>
9 #include <AMReX_MLLinOp_K.H>
10 #include <AMReX_MLMG_K.H>
11 
12 #ifndef BL_NO_FORT
13 #include <AMReX_MLLinOp_F.H>
14 #endif
15 
16 namespace amrex {
17 
18 template <typename MF>
19 class MLCellLinOpT // NOLINT(cppcoreguidelines-virtual-class-destructor)
20  : public MLLinOpT<MF>
21 {
22 public:
23 
24  using FAB = typename MF::fab_type;
25  using RT = typename MF::value_type;
26 
27  using BCType = LinOpBCType;
28  using BCMode = typename MLLinOpT<MF>::BCMode;
30  using Location = typename MLLinOpT<MF>::Location;
31 
32  MLCellLinOpT ();
33  ~MLCellLinOpT () override = default;
34 
35  MLCellLinOpT (const MLCellLinOpT<MF>&) = delete;
39 
40  void define (const Vector<Geometry>& a_geom,
41  const Vector<BoxArray>& a_grids,
42  const Vector<DistributionMapping>& a_dmap,
43  const LPInfo& a_info = LPInfo(),
44  const Vector<FabFactory<FAB> const*>& a_factory = {});
45 
46  void setLevelBC (int amrlev, const MF* levelbcdata,
47  const MF* robinbc_a = nullptr,
48  const MF* robinbc_b = nullptr,
49  const MF* robinbc_f = nullptr) final;
50 
51  using MLLinOpT<MF>::setLevelBC;
52 
53  bool needsUpdate () const override {
55  }
56  void update () override;
57 
58  void setGaussSeidel (bool flag) noexcept { m_use_gauss_seidel = flag; }
59 
60  virtual bool isCrossStencil () const { return true; }
61  virtual bool isTensorOp () const { return false; }
62 
63  void updateSolBC (int amrlev, const MF& crse_bcdata) const;
64  void updateCorBC (int amrlev, const MF& crse_bcdata) const;
65 
66  virtual void applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode s_mode,
67  const MLMGBndryT<MF>* bndry=nullptr, bool skip_fillboundary=false) const;
68 
69  BoxArray makeNGrids (int grid_size) const;
70 
71  void restriction (int, int, MF& crse, MF& fine) const override;
72 
73  void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const override;
74 
75  void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const override;
76 
77  void interpolationAmr (int famrlev, MF& fine, const MF& crse,
78  IntVect const& nghost) const override;
79 
80  void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
81  const MF& fine_sol, const MF& fine_rhs) override;
82 
83  void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
84  StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const override;
85  void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
86  bool skip_fillboundary=false) const final;
87 
88  void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
89  const MF* crse_bcdata=nullptr) override;
90 
91  void prepareForFluxes (int amrlev, const MF* crse_bcdata = nullptr) override;
92 
93  void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
94  BCMode bc_mode, const MF* crse_bcdata=nullptr) final;
95 
96  // The assumption is crse_sol's boundary has been filled, but not fine_sol.
97  void reflux (int crse_amrlev,
98  MF& res, const MF& crse_sol, const MF&,
99  MF&, MF& fine_sol, const MF&) const final;
100  void compFlux (int amrlev, const Array<MF*,AMREX_SPACEDIM>& fluxes,
101  MF& sol, Location loc) const override;
102  void compGrad (int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad,
103  MF& sol, Location loc) const override;
104 
105  void applyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
106  void unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
107  Vector<RT> getSolvabilityOffset (int amrlev, int mglev,
108  MF const& rhs) const override;
109  void fixSolvabilityByOffset (int amrlev, int mglev, MF& rhs,
110  Vector<RT> const& offset) const override;
111 
112  void prepareForSolve () override;
113 
114  RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const final;
115 
116  virtual void Fapply (int amrlev, int mglev, MF& out, const MF& in) const = 0;
117  virtual void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const = 0;
118  virtual void FFlux (int amrlev, const MFIter& mfi,
119  const Array<FAB*,AMREX_SPACEDIM>& flux,
120  const FAB& sol, Location loc, int face_only=0) const = 0;
121 
122  virtual void addInhomogNeumannFlux (int /*amrlev*/,
123  const Array<MF*,AMREX_SPACEDIM>& /*grad*/,
124  MF const& /*sol*/,
125  bool /*mult_bcoef*/) const {}
126 
127  RT normInf (int amrlev, MF const& mf, bool local) const override;
128 
129  void averageDownAndSync (Vector<MF>& sol) const override;
130 
131  void avgDownResAmr (int clev, MF& cres, MF const& fres) const override;
132 
133  struct BCTL {
136  };
137 
139 
140 #ifdef AMREX_USE_HYPRE
141  void setInterpBndryHalfWidth (int w) { m_interpbndry_halfwidth = w; }
142 #endif
143 
144 protected:
145 
146  bool m_has_metric_term = false;
147 
150 
153 
154  // In case of agglomeration, coarse MG grids on amr level 0 are
155  // not simply coarsened from fine MG grids. So we need to build
156  // bcond and bcloc for each MG level.
160  {
161  public:
162  BndryCondLoc (const BoxArray& ba, const DistributionMapping& dm, int ncomp);
163 
164  void setLOBndryConds (const Geometry& geom, const Real* dx,
165  const Vector<Array<BCType,AMREX_SPACEDIM> >& lobc,
166  const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc,
167  IntVect const& ratio, const RealVect& interior_bloc,
168  const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
169  const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
170  LinOpBCType crse_fine_bc_type);
171 
172  const Vector<BCTuple>& bndryConds (const MFIter& mfi) const noexcept {
173  return bcond[mfi];
174  }
175  const Vector<RealTuple>& bndryLocs (const MFIter& mfi) const noexcept {
176  return bcloc[mfi];
177  }
178  const BCTuple& bndryConds (const MFIter& mfi, int icomp) const noexcept {
179  return bcond[mfi][icomp];
180  }
181  const RealTuple& bndryLocs (const MFIter& mfi, int icomp) const noexcept {
182  return bcloc[mfi][icomp];
183  }
184  GpuArray<BCTL,2*AMREX_SPACEDIM> const* getBCTLPtr (const MFIter& mfi) const noexcept {
185  return bctl[mfi];
186  }
187  private:
192  int m_ncomp;
193  };
195 
196  // used to save interpolation coefficients of the first interior cells
198 
199  // boundary cell flags for covered, not_covered, outside_domain
201 
203 
205 
206  bool m_use_gauss_seidel = true; // use red-black Gauss-Seidel by default
207 
208 private:
209 
210  void defineAuxData ();
211  void defineBC ();
212 
213  void computeVolInv () const;
214  mutable Vector<Vector<RT> > m_volinv; // used by solvability fix
215 
217 };
218 
219 template <typename T>
220 struct MLMGABCTag {
231  int blen;
232  int comp;
233  int dir;
234 
236  Box const& box() const noexcept { return bx; }
237 };
238 
239 template <typename T>
240 struct MLMGPSTag {
245  T bcllo;
246  T bclhi;
250  int blen;
251  int comp;
252  int dir;
253 
255  Box const& box() const noexcept { return bx; }
256 };
257 
258 #ifdef AMREX_USE_EB
259 template <typename T>
260 struct MLMGPSEBTag {
261  Array4<T> flo;
262  Array4<T> fhi;
263  Array4<T const> ap;
264  Array4<int const> mlo;
265  Array4<int const> mhi;
266  T bcllo;
267  T bclhi;
268  Box bx;
269  BoundCond bctlo;
270  BoundCond bcthi;
271  int blen;
272  int comp;
273  int dir;
274 
276  Box const& box() const noexcept { return bx; }
277 };
278 #endif
279 
280 template <typename MF>
282  const DistributionMapping& dm,
283  int ncomp)
284  : bcond(ba, dm),
285  bcloc(ba, dm),
286  bctl(ba, dm),
287  bctl_dv(bctl.local_size()*ncomp),
288  m_ncomp(ncomp)
289 {
290  auto* dp = bctl_dv.data();
291  for (MFIter mfi(bcloc); mfi.isValid(); ++mfi) {
292  bcond[mfi].resize(ncomp);
293  bcloc[mfi].resize(ncomp);
294  bctl[mfi] = dp;
295  dp += ncomp;
296  }
297 }
298 
299 template <typename MF>
300 void
302 setLOBndryConds (const Geometry& geom, const Real* dx,
303  const Vector<Array<BCType,AMREX_SPACEDIM> >& lobc,
304  const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc,
305  IntVect const& ratio, const RealVect& interior_bloc,
306  const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
307  const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
308  LinOpBCType crse_fine_bc_type)
309 {
310  const Box& domain = geom.Domain();
311 
312 #ifdef AMREX_USE_OMP
313 #pragma omp parallel
314 #endif
315  for (MFIter mfi(bcloc); mfi.isValid(); ++mfi)
316  {
317  const Box& bx = mfi.validbox();
318  for (int icomp = 0; icomp < m_ncomp; ++icomp) {
319  RealTuple & bloc = bcloc[mfi][icomp];
320  BCTuple & bctag = bcond[mfi][icomp];
321  MLMGBndryT<MF>::setBoxBC(bloc, bctag, bx, domain,
322  lobc[icomp], hibc[icomp],
323  dx, ratio, interior_bloc,
324  domain_bloc_lo, domain_bloc_hi,
325  geom.isPeriodicArray(),
326  crse_fine_bc_type);
327  }
328  }
329 
331  hv.reserve(bctl_dv.size());
332  for (MFIter mfi(bctl); mfi.isValid(); ++mfi)
333  {
334  for (int icomp = 0; icomp < m_ncomp; ++icomp) {
336  for (int m = 0; m < 2*AMREX_SPACEDIM; ++m) {
337  tmp[m].type = bcond[mfi][icomp][m];
338  tmp[m].location = bcloc[mfi][icomp][m];
339  }
340  hv.push_back(std::move(tmp));
341  }
342  }
343  Gpu::copyAsync(Gpu::hostToDevice, hv.begin(), hv.end(), bctl_dv.begin());
345 }
346 
347 template <typename MF>
349 {
351 }
352 
353 template <typename MF>
354 void
356  const Vector<BoxArray>& a_grids,
357  const Vector<DistributionMapping>& a_dmap,
358  const LPInfo& a_info,
359  const Vector<FabFactory<FAB> const*>& a_factory)
360 {
361  MLLinOpT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
362  defineAuxData();
363  defineBC();
364 }
365 
366 template <typename MF>
367 void
369 {
370  BL_PROFILE("MLCellLinOp::defineAuxData()");
371 
372  m_undrrelxr.resize(this->m_num_amr_levels);
373  m_maskvals.resize(this->m_num_amr_levels);
374  m_fluxreg.resize(this->m_num_amr_levels-1);
375  m_norm_fine_mask.resize(this->m_num_amr_levels-1);
376 
377  const int ncomp = this->getNComp();
378 
379  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
380  {
381  m_undrrelxr[amrlev].resize(this->m_num_mg_levels[amrlev]);
382  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
383  {
384  m_undrrelxr[amrlev][mglev].define(this->m_grids[amrlev][mglev],
385  this->m_dmap[amrlev][mglev],
386  1, 0, 0, ncomp);
387  }
388  }
389 
390  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
391  {
392  m_maskvals[amrlev].resize(this->m_num_mg_levels[amrlev]);
393  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
394  {
395  for (OrientationIter oitr; oitr; ++oitr)
396  {
397  const Orientation face = oitr();
398  const int ngrow = 1;
399  const int extent = this->isCrossStencil() ? 0 : 1; // extend to corners
400  m_maskvals[amrlev][mglev][face].define(this->m_grids[amrlev][mglev],
401  this->m_dmap[amrlev][mglev],
402  this->m_geom[amrlev][mglev],
403  face, 0, ngrow, extent, 1, true);
404  }
405  }
406  }
407 
408  for (int amrlev = 0; amrlev < this->m_num_amr_levels-1; ++amrlev)
409  {
410  const IntVect ratio{this->m_amr_ref_ratio[amrlev]};
411  m_fluxreg[amrlev].define(this->m_grids[amrlev+1][0],
412  this->m_grids[amrlev][0],
413  this->m_dmap[amrlev+1][0],
414  this->m_dmap[amrlev][0],
415  this->m_geom[amrlev+1][0],
416  this->m_geom[amrlev][0],
417  ratio, amrlev+1, ncomp);
418  m_norm_fine_mask[amrlev] = std::make_unique<iMultiFab>
419  (makeFineMask(this->m_grids[amrlev][0], this->m_dmap[amrlev][0],
420  this->m_grids[amrlev+1][0],
421  ratio, 1, 0));
422  }
423 
424 #if (AMREX_SPACEDIM != 3)
425  m_has_metric_term = !this->m_geom[0][0].IsCartesian() && this->info.has_metric_term;
426 #endif
427 }
428 
429 template <typename MF>
430 void
432 {
433  BL_PROFILE("MLCellLinOp::defineBC()");
434 
435  const int ncomp = this->getNComp();
436 
437  m_bndry_sol.resize(this->m_num_amr_levels);
438  m_crse_sol_br.resize(this->m_num_amr_levels);
439 
440  m_bndry_cor.resize(this->m_num_amr_levels);
441  m_crse_cor_br.resize(this->m_num_amr_levels);
442 
443  m_robin_bcval.resize(this->m_num_amr_levels);
444 
445  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
446  {
447  m_bndry_sol[amrlev] = std::make_unique<MLMGBndryT<MF>>(this->m_grids[amrlev][0],
448  this->m_dmap[amrlev][0],
449  ncomp,
450  this->m_geom[amrlev][0]);
451  }
452 
453  for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
454  {
455  const int in_rad = 0;
456  const int out_rad = 1;
457  const int extent_rad = 2;
458  const int crse_ratio = this->m_amr_ref_ratio[amrlev-1];
459  BoxArray cba = this->m_grids[amrlev][0];
460  cba.coarsen(crse_ratio);
461  m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
462  (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
463  }
464 
465  for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
466  {
467  const int in_rad = 0;
468  const int out_rad = 1;
469  const int extent_rad = 2;
470  const int crse_ratio = this->m_amr_ref_ratio[amrlev-1];
471  BoxArray cba = this->m_grids[amrlev][0];
472  cba.coarsen(crse_ratio);
473  m_crse_cor_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
474  (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
475  m_crse_cor_br[amrlev]->setVal(RT(0.0));
476  }
477 
478  // This has be to done after m_crse_cor_br is defined.
479  for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
480  {
481  m_bndry_cor[amrlev] = std::make_unique<MLMGBndryT<MF>>
482  (this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, this->m_geom[amrlev][0]);
483  MF bc_data(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, 1);
484  bc_data.setVal(0.0);
485 
486  m_bndry_cor[amrlev]->setBndryValues(*m_crse_cor_br[amrlev], 0, bc_data, 0, 0, ncomp,
487  IntVect(this->m_amr_ref_ratio[amrlev-1]),
490 
492  (ncomp,Array<LinOpBCType,AMREX_SPACEDIM>{{AMREX_D_DECL(BCType::Dirichlet,
493  BCType::Dirichlet,
494  BCType::Dirichlet)}});
495  m_bndry_cor[amrlev]->setLOBndryConds(bclohi, bclohi, this->m_amr_ref_ratio[amrlev-1], RealVect{});
496  }
497 
498  m_bcondloc.resize(this->m_num_amr_levels);
499  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
500  {
501  m_bcondloc[amrlev].resize(this->m_num_mg_levels[amrlev]);
502  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
503  {
504  m_bcondloc[amrlev][mglev] = std::make_unique<BndryCondLoc>(this->m_grids[amrlev][mglev],
505  this->m_dmap[amrlev][mglev],
506  ncomp);
507  }
508  }
509 }
510 
511 template <typename MF>
512 void
513 MLCellLinOpT<MF>::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* robinbc_a,
514  const MF* robinbc_b, const MF* robinbc_f)
515 {
516  BL_PROFILE("MLCellLinOp::setLevelBC()");
517 
518  AMREX_ALWAYS_ASSERT(amrlev >= 0 && amrlev < this->m_num_amr_levels);
519 
520  const int ncomp = this->getNComp();
521 
522  MF zero;
523  IntVect ng(1);
524  if (this->hasHiddenDimension()) { ng[this->hiddenDirection()] = 0; }
525  if (a_levelbcdata == nullptr) {
526  zero.define(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, ng);
527  zero.setVal(RT(0.0));
528  } else {
529  AMREX_ALWAYS_ASSERT(a_levelbcdata->nGrowVect().allGE(ng));
530  }
531  const MF& bcdata = (a_levelbcdata == nullptr) ? zero : *a_levelbcdata;
532 
533  IntVect br_ref_ratio(-1);
534 
535  if (amrlev == 0)
536  {
537  if (this->needsCoarseDataForBC())
538  {
539  // AMREX_ALWAYS_ASSERT(!this->hasHiddenDimension());
540  if (this->hasHiddenDimension()) {
541  int hidden_dir = this->hiddenDirection();
542  AMREX_ALWAYS_ASSERT(this->m_coarse_data_crse_ratio[hidden_dir] == 1);
543  }
544  br_ref_ratio = this->m_coarse_data_crse_ratio.allGT(0) ? this->m_coarse_data_crse_ratio : IntVect(2);
545  if (m_crse_sol_br[amrlev] == nullptr && br_ref_ratio.allGT(0))
546  {
547  const int in_rad = 0;
548  const int out_rad = 1;
549  const int extent_rad = 2;
550  const IntVect crse_ratio = br_ref_ratio;
551  BoxArray cba = this->m_grids[amrlev][0];
552  cba.coarsen(crse_ratio);
553  m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
554  (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
555  }
556  if (this->m_coarse_data_for_bc != nullptr) {
558  const Box& cbx = amrex::coarsen(this->m_geom[0][0].Domain(), this->m_coarse_data_crse_ratio);
559  m_crse_sol_br[amrlev]->copyFrom(*this->m_coarse_data_for_bc, 0, 0, 0, ncomp,
560  this->m_geom[0][0].periodicity(cbx));
561  } else {
562  m_crse_sol_br[amrlev]->setVal(RT(0.0));
563  }
564  m_bndry_sol[amrlev]->setBndryValues(*m_crse_sol_br[amrlev], 0,
565  bcdata, 0, 0, ncomp, br_ref_ratio,
568  br_ref_ratio = this->m_coarse_data_crse_ratio;
569  }
570  else
571  {
572  m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
573  br_ref_ratio = IntVect(1);
574  }
575  }
576  else
577  {
578  m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
579  br_ref_ratio = IntVect(this->m_amr_ref_ratio[amrlev-1]);
580  }
581 
582  auto crse_fine_bc_type = (amrlev == 0) ? this->m_coarse_fine_bc_type : LinOpBCType::Dirichlet;
583  m_bndry_sol[amrlev]->setLOBndryConds(this->m_lobc, this->m_hibc, br_ref_ratio,
584  this->m_coarse_bc_loc, crse_fine_bc_type);
585 
586  const Real* dx = this->m_geom[amrlev][0].CellSize();
587  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
588  {
589  m_bcondloc[amrlev][mglev]->setLOBndryConds(this->m_geom[amrlev][mglev], dx,
590  this->m_lobc, this->m_hibc,
591  br_ref_ratio, this->m_coarse_bc_loc,
592  this->m_domain_bloc_lo, this->m_domain_bloc_hi,
593  crse_fine_bc_type);
594  }
595 
596  if (this->hasRobinBC()) {
597  AMREX_ASSERT(robinbc_a != nullptr && robinbc_b != nullptr && robinbc_f != nullptr);
598  m_robin_bcval[amrlev] = std::make_unique<MF>(this->m_grids[amrlev][0],
599  this->m_dmap[amrlev][0],
600  ncomp*3, 1);
601  const Box& domain = this->m_geom[amrlev][0].Domain();
602  MFItInfo mfi_info;
603  if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
604 #ifdef AMREX_USE_OMP
605 #pragma omp parallel if (Gpu::notInLaunchRegion())
606 #endif
607  for (MFIter mfi(*m_robin_bcval[amrlev], mfi_info); mfi.isValid(); ++mfi) {
608  Box const& vbx = mfi.validbox();
609  Array4<RT const> const& ra = robinbc_a->const_array(mfi);
610  Array4<RT const> const& rb = robinbc_b->const_array(mfi);
611  Array4<RT const> const& rf = robinbc_f->const_array(mfi);
612  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
613  const Box& blo = amrex::adjCellLo(vbx, idim);
614  const Box& bhi = amrex::adjCellHi(vbx, idim);
615  bool outside_domain_lo = !(domain.contains(blo));
616  bool outside_domain_hi = !(domain.contains(bhi));
617  if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
618  for (int icomp = 0; icomp < ncomp; ++icomp) {
619  Array4<RT> const& rbc = (*m_robin_bcval[amrlev])[mfi].array(icomp*3);
620  if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
621  {
623  {
624  rbc(i,j,k,0) = ra(i,j,k,icomp);
625  rbc(i,j,k,1) = rb(i,j,k,icomp);
626  rbc(i,j,k,2) = rf(i,j,k,icomp);
627  });
628  }
629  if (this->m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
630  {
632  {
633  rbc(i,j,k,0) = ra(i,j,k,icomp);
634  rbc(i,j,k,1) = rb(i,j,k,icomp);
635  rbc(i,j,k,2) = rf(i,j,k,icomp);
636  });
637  }
638  }
639  }
640  }
641  }
642 }
643 
644 template <typename MF>
645 void
647 {
649 }
650 
651 template <typename MF>
652 void
653 MLCellLinOpT<MF>::updateSolBC (int amrlev, const MF& crse_bcdata) const
654 {
655  BL_PROFILE("MLCellLinOp::updateSolBC()");
656 
657  AMREX_ALWAYS_ASSERT(amrlev > 0);
658  const int ncomp = this->getNComp();
659  m_crse_sol_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
660  this->m_geom[amrlev-1][0].periodicity());
661  m_bndry_sol[amrlev]->updateBndryValues(*m_crse_sol_br[amrlev], 0, 0, ncomp,
662  IntVect(this->m_amr_ref_ratio[amrlev-1]),
665 }
666 
667 template <typename MF>
668 void
669 MLCellLinOpT<MF>::updateCorBC (int amrlev, const MF& crse_bcdata) const
670 {
671  BL_PROFILE("MLCellLinOp::updateCorBC()");
672  AMREX_ALWAYS_ASSERT(amrlev > 0);
673  const int ncomp = this->getNComp();
674  m_crse_cor_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
675  this->m_geom[amrlev-1][0].periodicity());
676  m_bndry_cor[amrlev]->updateBndryValues(*m_crse_cor_br[amrlev], 0, 0, ncomp,
677  IntVect(this->m_amr_ref_ratio[amrlev-1]),
680 }
681 
682 template <typename MF>
683 void
684 MLCellLinOpT<MF>::applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode,
685  const MLMGBndryT<MF>* bndry, bool skip_fillboundary) const
686 {
687  BL_PROFILE("MLCellLinOp::applyBC()");
688  // No coarsened boundary values, cannot apply inhomog at mglev>0.
689  BL_ASSERT(mglev == 0 || bc_mode == BCMode::Homogeneous);
690  BL_ASSERT(bndry != nullptr || bc_mode == BCMode::Homogeneous);
691 
692  const int ncomp = this->getNComp();
693  const int cross = isCrossStencil();
694  const int tensorop = isTensorOp();
695  if (!skip_fillboundary) {
696  in.FillBoundary(0, ncomp, this->m_geom[amrlev][mglev].periodicity(), cross);
697  }
698 
699  int flagbc = bc_mode == BCMode::Inhomogeneous;
700  const int imaxorder = this->maxorder;
701 
702  const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
703  const RT dxi = static_cast<RT>(dxinv[0]);
704  const RT dyi = (AMREX_SPACEDIM >= 2) ? static_cast<RT>(dxinv[1]) : RT(1.0);
705  const RT dzi = (AMREX_SPACEDIM == 3) ? static_cast<RT>(dxinv[2]) : RT(1.0);
706 
707  const auto& maskvals = m_maskvals[amrlev][mglev];
708  const auto& bcondloc = *m_bcondloc[amrlev][mglev];
709 
710  FAB foofab(Box::TheUnitBox(),ncomp);
711  const auto& foo = foofab.const_array();
712 
713  MFItInfo mfi_info;
714  if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
715 
717  "non-cross stencil not support for gpu");
718 
719  const int hidden_direction = this->hiddenDirection();
720 
721 #ifdef AMREX_USE_GPU
722  if ((cross || tensorop) && Gpu::inLaunchRegion())
723  {
724  Vector<MLMGABCTag<RT>> tags;
725  tags.reserve(in.local_size()*AMREX_SPACEDIM*ncomp);
726 
727  for (MFIter mfi(in); mfi.isValid(); ++mfi) {
728  const Box& vbx = mfi.validbox();
729  const auto& iofab = in.array(mfi);
730  const auto & bdlv = bcondloc.bndryLocs(mfi);
731  const auto & bdcv = bcondloc.bndryConds(mfi);
732 
733  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
734  if (idim != hidden_direction) {
735  const Orientation olo(idim,Orientation::low);
736  const Orientation ohi(idim,Orientation::high);
737  const auto& bvlo = (bndry != nullptr) ?
738  bndry->bndryValues(olo).const_array(mfi) : foo;
739  const auto& bvhi = (bndry != nullptr) ?
740  bndry->bndryValues(ohi).const_array(mfi) : foo;
741  for (int icomp = 0; icomp < ncomp; ++icomp) {
742  tags.emplace_back(MLMGABCTag<RT>{iofab, bvlo, bvhi,
743  maskvals[olo].const_array(mfi),
744  maskvals[ohi].const_array(mfi),
745  bdlv[icomp][olo], bdlv[icomp][ohi],
746  amrex::adjCell(vbx,olo),
747  bdcv[icomp][olo], bdcv[icomp][ohi],
748  vbx.length(idim), icomp, idim});
749  }
750  }
751  }
752  }
753 
754  ParallelFor(tags,
755  [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGABCTag<RT> const& tag) noexcept
756  {
757  if (tag.dir == 0)
758  {
759  mllinop_apply_bc_x(0, i, j, k, tag.blen, tag.fab,
760  tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
761  imaxorder, dxi, flagbc, tag.comp);
762  mllinop_apply_bc_x(1, i+tag.blen+1, j, k, tag.blen, tag.fab,
763  tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
764  imaxorder, dxi, flagbc, tag.comp);
765  }
766 #if (AMREX_SPACEDIM > 1)
767  else
768 #if (AMREX_SPACEDIM > 2)
769  if (tag.dir == 1)
770 #endif
771  {
772  mllinop_apply_bc_y(0, i, j, k, tag.blen, tag.fab,
773  tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
774  imaxorder, dyi, flagbc, tag.comp);
775  mllinop_apply_bc_y(1, i, j+tag.blen+1, k, tag.blen, tag.fab,
776  tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
777  imaxorder, dyi, flagbc, tag.comp);
778  }
779 #if (AMREX_SPACEDIM > 2)
780  else {
781  mllinop_apply_bc_z(0, i, j, k, tag.blen, tag.fab,
782  tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
783  imaxorder, dzi, flagbc, tag.comp);
784  mllinop_apply_bc_z(1, i, j, k+tag.blen+1, tag.blen, tag.fab,
785  tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
786  imaxorder, dzi, flagbc, tag.comp);
787  }
788 #endif
789 #endif
790  });
791  } else
792 #endif
793  if (cross || tensorop)
794  {
795 #ifdef AMREX_USE_OMP
796 #pragma omp parallel if (Gpu::notInLaunchRegion())
797 #endif
798  for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
799  {
800  const Box& vbx = mfi.validbox();
801  const auto& iofab = in.array(mfi);
802 
803  const auto & bdlv = bcondloc.bndryLocs(mfi);
804  const auto & bdcv = bcondloc.bndryConds(mfi);
805 
806  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
807  {
808  if (hidden_direction == idim) { continue; }
809  const Orientation olo(idim,Orientation::low);
810  const Orientation ohi(idim,Orientation::high);
811  const Box blo = amrex::adjCellLo(vbx, idim);
812  const Box bhi = amrex::adjCellHi(vbx, idim);
813  const int blen = vbx.length(idim);
814  const auto& mlo = maskvals[olo].array(mfi);
815  const auto& mhi = maskvals[ohi].array(mfi);
816  const auto& bvlo = (bndry != nullptr) ? bndry->bndryValues(olo).const_array(mfi) : foo;
817  const auto& bvhi = (bndry != nullptr) ? bndry->bndryValues(ohi).const_array(mfi) : foo;
818  for (int icomp = 0; icomp < ncomp; ++icomp) {
819  const BoundCond bctlo = bdcv[icomp][olo];
820  const BoundCond bcthi = bdcv[icomp][ohi];
821  const RT bcllo = bdlv[icomp][olo];
822  const RT bclhi = bdlv[icomp][ohi];
823  if (idim == 0) {
824  mllinop_apply_bc_x(0, blo, blen, iofab, mlo,
825  bctlo, bcllo, bvlo,
826  imaxorder, dxi, flagbc, icomp);
827  mllinop_apply_bc_x(1, bhi, blen, iofab, mhi,
828  bcthi, bclhi, bvhi,
829  imaxorder, dxi, flagbc, icomp);
830  } else if (idim == 1) {
831  mllinop_apply_bc_y(0, blo, blen, iofab, mlo,
832  bctlo, bcllo, bvlo,
833  imaxorder, dyi, flagbc, icomp);
834  mllinop_apply_bc_y(1, bhi, blen, iofab, mhi,
835  bcthi, bclhi, bvhi,
836  imaxorder, dyi, flagbc, icomp);
837  } else {
838  mllinop_apply_bc_z(0, blo, blen, iofab, mlo,
839  bctlo, bcllo, bvlo,
840  imaxorder, dzi, flagbc, icomp);
841  mllinop_apply_bc_z(1, bhi, blen, iofab, mhi,
842  bcthi, bclhi, bvhi,
843  imaxorder, dzi, flagbc, icomp);
844  }
845  }
846  }
847  }
848  }
849  else
850  {
851 #ifdef BL_NO_FORT
852  amrex::Abort("amrex_mllinop_apply_bc not available when BL_NO_FORT=TRUE");
853 #else
854  if constexpr (std::is_same_v<Real,RT>) {
855 #ifdef AMREX_USE_OMP
856 #pragma omp parallel
857 #endif
858  for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
859  {
860  const Box& vbx = mfi.validbox();
861 
862  const auto & bdlv = bcondloc.bndryLocs(mfi);
863  const auto & bdcv = bcondloc.bndryConds(mfi);
864 
865  const RealTuple & bdl = bdlv[0];
866  const BCTuple & bdc = bdcv[0];
867 
868  for (OrientationIter oitr; oitr; ++oitr)
869  {
870  const Orientation ori = oitr();
871 
872  int cdr = ori;
873  RT bcl = bdl[ori];
874  int bct = bdc[ori];
875 
876  const auto& fsfab = (bndry != nullptr) ? bndry->bndryValues(ori)[mfi] : foofab;
877 
878  const Mask& m = maskvals[ori][mfi];
879 
880  amrex_mllinop_apply_bc(BL_TO_FORTRAN_BOX(vbx),
881  BL_TO_FORTRAN_ANYD(in[mfi]),
882  BL_TO_FORTRAN_ANYD(m),
883  cdr, bct, bcl,
884  BL_TO_FORTRAN_ANYD(fsfab),
885  imaxorder, dxinv, flagbc, ncomp, cross);
886  }
887  }
888  } else {
889  amrex::Abort("Not supported");
890  }
891 #endif
892  }
893 }
894 
895 template <typename MF>
896 BoxArray
897 MLCellLinOpT<MF>::makeNGrids (int grid_size) const
898 {
899  const Box& dombx = this->m_geom[0].back().Domain();
900 
901  const BoxArray& old_ba = this->m_grids[0].back();
902  const int N = old_ba.size();
903  Vector<Box> bv;
904  bv.reserve(N);
905  for (int i = 0; i < N; ++i)
906  {
907  Box b = old_ba[i];
908  b.coarsen(grid_size);
909  b.refine(grid_size);
910  IntVect sz = b.size();
911  const IntVect nblks {AMREX_D_DECL(sz[0]/grid_size, sz[1]/grid_size, sz[2]/grid_size)};
912 
913  IntVect big = b.smallEnd() + grid_size - 1;
914  b.setBig(big);
915 
916 #if (AMREX_SPACEDIM == 3)
917  for (int kk = 0; kk < nblks[2]; ++kk) {
918 #endif
919 #if (AMREX_SPACEDIM >= 2)
920  for (int jj = 0; jj < nblks[1]; ++jj) {
921 #endif
922  for (int ii = 0; ii < nblks[0]; ++ii)
923  {
924  IntVect shft{AMREX_D_DECL(ii*grid_size,jj*grid_size,kk*grid_size)};
925  Box bb = amrex::shift(b,shft);
926  bb &= dombx;
927  bv.push_back(bb);
928  }
929 #if (AMREX_SPACEDIM >= 2)
930  }
931 #endif
932 #if (AMREX_SPACEDIM == 3)
933  }
934 #endif
935  }
936 
937  std::sort(bv.begin(), bv.end());
938  bv.erase(std::unique(bv.begin(), bv.end()), bv.end());
939 
940  BoxList bl(std::move(bv));
941 
942  return BoxArray{std::move(bl)};
943 }
944 
945 template <typename MF>
946 void
947 MLCellLinOpT<MF>::restriction (int amrlev, int cmglev, MF& crse, MF& fine) const
948 {
949  const int ncomp = this->getNComp();
950  IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[cmglev-1];
951  amrex::average_down(fine, crse, 0, ncomp, ratio);
952 }
953 
954 template <typename MF>
955 void
956 MLCellLinOpT<MF>::interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const
957 {
958  const int ncomp = this->getNComp();
959 
960  Dim3 ratio3 = {2,2,2};
961  IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
962  AMREX_D_TERM(ratio3.x = ratio[0];,
963  ratio3.y = ratio[1];,
964  ratio3.z = ratio[2];);
965 
966 #ifdef AMREX_USE_GPU
967  if (Gpu::inLaunchRegion() && fine.isFusingCandidate()) {
968  auto const& finema = fine.arrays();
969  auto const& crsema = crse.const_arrays();
970  ParallelFor(fine, IntVect(0), ncomp,
971  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
972  {
973  int ic = amrex::coarsen(i,ratio3.x);
974  int jc = amrex::coarsen(j,ratio3.y);
975  int kc = amrex::coarsen(k,ratio3.z);
976  finema[box_no](i,j,k,n) += crsema[box_no](ic,jc,kc,n);
977  });
979  } else
980 #endif
981  {
982 #ifdef AMREX_USE_OMP
983 #pragma omp parallel if (Gpu::notInLaunchRegion())
984 #endif
985  for (MFIter mfi(fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
986  {
987  const Box& bx = mfi.tilebox();
988  Array4<RT const> const& cfab = crse.const_array(mfi);
989  Array4<RT> const& ffab = fine.array(mfi);
990  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
991  {
992  int ic = amrex::coarsen(i,ratio3.x);
993  int jc = amrex::coarsen(j,ratio3.y);
994  int kc = amrex::coarsen(k,ratio3.z);
995  ffab(i,j,k,n) += cfab(ic,jc,kc,n);
996  });
997  }
998  }
999 }
1000 
1001 template <typename MF>
1002 void
1003 MLCellLinOpT<MF>::interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
1004 {
1005  const int ncomp = this->getNComp();
1006 
1007  const Geometry& crse_geom = this->Geom(amrlev,fmglev+1);
1008  const IntVect refratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
1009  const IntVect ng = crse.nGrowVect();
1010 
1011  MF cfine;
1012  const MF* cmf;
1013 
1015  {
1016  crse.FillBoundary(crse_geom.periodicity());
1017  cmf = &crse;
1018  }
1019  else
1020  {
1021  BoxArray cba = fine.boxArray();
1022  cba.coarsen(refratio);
1023  cfine.define(cba, fine.DistributionMap(), ncomp, ng);
1024  cfine.setVal(RT(0.0));
1025  cfine.ParallelCopy(crse, 0, 0, ncomp, IntVect(0), ng, crse_geom.periodicity());
1026  cmf = & cfine;
1027  }
1028 
1029  bool isEB = fine.hasEBFabFactory();
1030  ignore_unused(isEB);
1031 
1032 #ifdef AMREX_USE_EB
1033  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(&(fine.Factory()));
1034  const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1035 #endif
1036 
1037  MFItInfo mfi_info;
1038  if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1039 #ifdef AMREX_USE_OMP
1040 #pragma omp parallel if (Gpu::notInLaunchRegion())
1041 #endif
1042  for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1043  {
1044  const Box& bx = mfi.tilebox();
1045  const auto& ff = fine.array(mfi);
1046  const auto& cc = cmf->array(mfi);
1047 #ifdef AMREX_USE_EB
1048  bool call_lincc;
1049  if (isEB)
1050  {
1051  const auto& flag = (*flags)[mfi];
1052  if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1053  call_lincc = true;
1054  } else {
1055  Array4<EBCellFlag const> const& flg = flag.const_array();
1057  {
1058  mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1059  });
1060 
1061  call_lincc = false;
1062  }
1063  }
1064  else
1065  {
1066  call_lincc = true;
1067  }
1068 #else
1069  const bool call_lincc = true;
1070 #endif
1071  if (call_lincc)
1072  {
1073 #if (AMREX_SPACEDIM == 3)
1074  if (this->hasHiddenDimension()) {
1075  Box const& bx_2d = this->compactify(bx);
1076  auto const& ff_2d = this->compactify(ff);
1077  auto const& cc_2d = this->compactify(cc);
1078  AMREX_LAUNCH_HOST_DEVICE_LAMBDA (bx_2d, tbx,
1079  {
1080  TwoD::mlmg_lin_cc_interp_r2(tbx, ff_2d, cc_2d, ncomp);
1081  });
1082  } else
1083 #endif
1084  {
1086  {
1087  mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1088  });
1089  }
1090  }
1091  }
1092 }
1093 
1094 template <typename MF>
1095 void
1096 MLCellLinOpT<MF>::interpolationAmr (int famrlev, MF& fine, const MF& crse,
1097  IntVect const& /*nghost*/) const
1098 {
1099  const int ncomp = this->getNComp();
1100  const int refratio = this->AMRRefRatio(famrlev-1);
1101 
1102 #ifdef AMREX_USE_EB
1103  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(famrlev));
1104  const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1105 #endif
1106 
1107  MFItInfo mfi_info;
1108  if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1109 #ifdef AMREX_USE_OMP
1110 #pragma omp parallel if (Gpu::notInLaunchRegion())
1111 #endif
1112  for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1113  {
1114  const Box& bx = mfi.tilebox();
1115  auto const& ff = fine.array(mfi);
1116  auto const& cc = crse.const_array(mfi);
1117 #ifdef AMREX_USE_EB
1118  bool call_lincc;
1119  if (factory)
1120  {
1121  const auto& flag = (*flags)[mfi];
1122  if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1123  call_lincc = true;
1124  } else {
1125  Array4<EBCellFlag const> const& flg = flag.const_array();
1126  switch(refratio) {
1127  case 2:
1128  {
1130  {
1131  mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1132  });
1133  break;
1134  }
1135  case 4:
1136  {
1138  {
1139  mlmg_eb_cc_interp_r<4>(tbx, ff, cc, flg, ncomp);
1140  });
1141  break;
1142  }
1143  default:
1144  amrex::Abort("mlmg_eb_cc_interp: only refratio 2 and 4 are supported");
1145  }
1146 
1147  call_lincc = false;
1148  }
1149  }
1150  else
1151  {
1152  call_lincc = true;
1153  }
1154 #else
1155  const bool call_lincc = true;
1156 #endif
1157  if (call_lincc)
1158  {
1159  switch(refratio) {
1160  case 2:
1161  {
1163  {
1164  mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1165  });
1166  break;
1167  }
1168  case 4:
1169  {
1171  {
1172  mlmg_lin_cc_interp_r4(tbx, ff, cc, ncomp);
1173  });
1174  break;
1175  }
1176  default:
1177  amrex::Abort("mlmg_lin_cc_interp: only refratio 2 and 4 are supported");
1178  }
1179  }
1180  }
1181 }
1182 
1183 template <typename MF>
1184 void
1185 MLCellLinOpT<MF>::averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
1186  const MF& fine_sol, const MF& fine_rhs)
1187 {
1188  const auto amrrr = this->AMRRefRatio(camrlev);
1189  const int ncomp = this->getNComp();
1190  amrex::average_down(fine_sol, crse_sol, 0, ncomp, amrrr);
1191  amrex::average_down(fine_rhs, crse_rhs, 0, ncomp, amrrr);
1192 }
1193 
1194 template <typename MF>
1195 void
1196 MLCellLinOpT<MF>::apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
1197  StateMode s_mode, const MLMGBndryT<MF>* bndry) const
1198 {
1199  BL_PROFILE("MLCellLinOp::apply()");
1200  applyBC(amrlev, mglev, in, bc_mode, s_mode, bndry);
1201  Fapply(amrlev, mglev, out, in);
1202 }
1203 
1204 template <typename MF>
1205 void
1206 MLCellLinOpT<MF>::smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
1207  bool skip_fillboundary) const
1208 {
1209  BL_PROFILE("MLCellLinOp::smooth()");
1210  for (int redblack = 0; redblack < 2; ++redblack)
1211  {
1212  applyBC(amrlev, mglev, sol, BCMode::Homogeneous, StateMode::Solution,
1213  nullptr, skip_fillboundary);
1214  Fsmooth(amrlev, mglev, sol, rhs, redblack);
1215  skip_fillboundary = false;
1216  }
1217 }
1218 
1219 template <typename MF>
1220 void
1221 MLCellLinOpT<MF>::solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
1222  const MF* crse_bcdata)
1223 {
1224  BL_PROFILE("MLCellLinOp::solutionResidual()");
1225  const int ncomp = this->getNComp();
1226  if (crse_bcdata != nullptr) {
1227  updateSolBC(amrlev, *crse_bcdata);
1228  }
1229  const int mglev = 0;
1230  apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Solution,
1231  m_bndry_sol[amrlev].get());
1232 
1233  AMREX_ASSERT(resid.nComp() == b.nComp());
1234  MF::Xpay(resid, RT(-1.0), b, 0, 0, ncomp, IntVect(0));
1235 }
1236 
1237 template <typename MF>
1238 void
1239 MLCellLinOpT<MF>::prepareForFluxes (int amrlev, const MF* crse_bcdata)
1240 {
1241  if (crse_bcdata != nullptr) {
1242  updateSolBC(amrlev, *crse_bcdata);
1243  }
1244 }
1245 
1246 template <typename MF>
1247 void
1248 MLCellLinOpT<MF>::correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
1249  BCMode bc_mode, const MF* crse_bcdata)
1250 {
1251  BL_PROFILE("MLCellLinOp::correctionResidual()");
1252  const int ncomp = this->getNComp();
1253  if (bc_mode == BCMode::Inhomogeneous)
1254  {
1255  if (crse_bcdata)
1256  {
1257  AMREX_ASSERT(mglev == 0 && amrlev > 0);
1258  updateCorBC(amrlev, *crse_bcdata);
1259  }
1260  apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Correction,
1261  m_bndry_cor[amrlev].get());
1262  }
1263  else
1264  {
1265  AMREX_ASSERT(crse_bcdata == nullptr);
1266  apply(amrlev, mglev, resid, x, BCMode::Homogeneous, StateMode::Correction, nullptr);
1267  }
1268 
1269  MF::Xpay(resid, Real(-1.0), b, 0, 0, ncomp, IntVect(0));
1270 }
1271 
1272 template <typename MF>
1273 void
1274 MLCellLinOpT<MF>::reflux (int crse_amrlev, MF& res, const MF& crse_sol, const MF&,
1275  MF&, MF& fine_sol, const MF&) const
1276 {
1277  BL_PROFILE("MLCellLinOp::reflux()");
1278 
1279  auto& fluxreg = m_fluxreg[crse_amrlev];
1280  fluxreg.reset();
1281 
1282  const int ncomp = this->getNComp();
1283 
1284  const int fine_amrlev = crse_amrlev+1;
1285 
1286  Real dt = Real(1.0);
1287  const Real* crse_dx = this->m_geom[crse_amrlev][0].CellSize();
1288  const Real* fine_dx = this->m_geom[fine_amrlev][0].CellSize();
1289 
1290  const int mglev = 0;
1291  applyBC(fine_amrlev, mglev, fine_sol, BCMode::Inhomogeneous, StateMode::Solution,
1292  m_bndry_sol[fine_amrlev].get());
1293 
1294  MFItInfo mfi_info;
1295  if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1296 
1297 #ifdef AMREX_USE_OMP
1298 #pragma omp parallel if (Gpu::notInLaunchRegion())
1299 #endif
1300  {
1302  Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1303  Array<FAB const*,AMREX_SPACEDIM> cpflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1304 
1305  for (MFIter mfi(crse_sol, mfi_info); mfi.isValid(); ++mfi)
1306  {
1307  if (fluxreg.CrseHasWork(mfi))
1308  {
1309  const Box& tbx = mfi.tilebox();
1310  AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp);,
1311  flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp);,
1312  flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp););
1313  AMREX_D_TERM(Elixir elifx = flux[0].elixir();,
1314  Elixir elify = flux[1].elixir();,
1315  Elixir elifz = flux[2].elixir(););
1316  FFlux(crse_amrlev, mfi, pflux, crse_sol[mfi], Location::FaceCentroid);
1317  fluxreg.CrseAdd(mfi, cpflux, crse_dx, dt, RunOn::Gpu);
1318  }
1319  }
1320 
1321 #ifdef AMREX_USE_OMP
1322 #pragma omp barrier
1323 #endif
1324 
1325  for (MFIter mfi(fine_sol, mfi_info); mfi.isValid(); ++mfi)
1326  {
1327  if (fluxreg.FineHasWork(mfi))
1328  {
1329  const Box& tbx = mfi.tilebox();
1330  const int face_only = true;
1331  AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp);,
1332  flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp);,
1333  flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp););
1334  AMREX_D_TERM(Elixir elifx = flux[0].elixir();,
1335  Elixir elify = flux[1].elixir();,
1336  Elixir elifz = flux[2].elixir(););
1337  FFlux(fine_amrlev, mfi, pflux, fine_sol[mfi], Location::FaceCentroid, face_only);
1338  fluxreg.FineAdd(mfi, cpflux, fine_dx, dt, RunOn::Gpu);
1339  }
1340  }
1341  }
1342 
1343  fluxreg.Reflux(res);
1344 }
1345 
1346 template <typename MF>
1347 void
1349  MF& sol, Location loc) const
1350 {
1351  BL_PROFILE("MLCellLinOp::compFlux()");
1352 
1353  const int mglev = 0;
1354  const int ncomp = this->getNComp();
1355  applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1356  m_bndry_sol[amrlev].get());
1357 
1358  MFItInfo mfi_info;
1359  if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1360 
1361 #ifdef AMREX_USE_OMP
1362 #pragma omp parallel if (Gpu::notInLaunchRegion())
1363 #endif
1364  {
1366  Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1367  for (MFIter mfi(sol, mfi_info); mfi.isValid(); ++mfi)
1368  {
1369  const Box& tbx = mfi.tilebox();
1370  AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp);,
1371  flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp);,
1372  flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp););
1373  AMREX_D_TERM(Elixir elifx = flux[0].elixir();,
1374  Elixir elify = flux[1].elixir();,
1375  Elixir elifz = flux[2].elixir(););
1376  FFlux(amrlev, mfi, pflux, sol[mfi], loc);
1377  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1378  const Box& nbx = mfi.nodaltilebox(idim);
1379  auto const& dst = fluxes[idim]->array(mfi);
1380  auto const& src = pflux[idim]->const_array();
1381  AMREX_HOST_DEVICE_PARALLEL_FOR_4D (nbx, ncomp, i, j, k, n,
1382  {
1383  dst(i,j,k,n) = src(i,j,k,n);
1384  });
1385  }
1386  }
1387  }
1388 }
1389 
1390 template <typename MF>
1391 void
1393  MF& sol, Location /*loc*/) const
1394 {
1395  BL_PROFILE("MLCellLinOp::compGrad()");
1396 
1397  if (sol.nComp() > 1) {
1398  amrex::Abort("MLCellLinOp::compGrad called, but only works for single-component solves");
1399  }
1400 
1401  const int mglev = 0;
1402  applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1403  m_bndry_sol[amrlev].get());
1404 
1405  const int ncomp = this->getNComp();
1406 
1407  AMREX_D_TERM(const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));,
1408  const RT dyi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1));,
1409  const RT dzi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)););
1410 #ifdef AMREX_USE_OMP
1411 #pragma omp parallel if (Gpu::notInLaunchRegion())
1412 #endif
1413  for (MFIter mfi(sol, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1414  {
1415  AMREX_D_TERM(const Box& xbx = mfi.nodaltilebox(0);,
1416  const Box& ybx = mfi.nodaltilebox(1);,
1417  const Box& zbx = mfi.nodaltilebox(2););
1418  const auto& s = sol.array(mfi);
1419  AMREX_D_TERM(const auto& gx = grad[0]->array(mfi);,
1420  const auto& gy = grad[1]->array(mfi);,
1421  const auto& gz = grad[2]->array(mfi););
1422 
1423  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( xbx, ncomp, i, j, k, n,
1424  {
1425  gx(i,j,k,n) = dxi*(s(i,j,k,n) - s(i-1,j,k,n));
1426  });
1427 #if (AMREX_SPACEDIM >= 2)
1428  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( ybx, ncomp, i, j, k, n,
1429  {
1430  gy(i,j,k,n) = dyi*(s(i,j,k,n) - s(i,j-1,k,n));
1431  });
1432 #endif
1433 #if (AMREX_SPACEDIM == 3)
1434  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( zbx, ncomp, i, j, k, n,
1435  {
1436  gz(i,j,k,n) = dzi*(s(i,j,k,n) - s(i,j,k-1,n));
1437  });
1438 #endif
1439  }
1440 
1441  addInhomogNeumannFlux(amrlev, grad, sol, false);
1442 }
1443 
1444 template <typename MF>
1445 void
1446 MLCellLinOpT<MF>::applyMetricTerm (int amrlev, int mglev, MF& rhs) const
1447 {
1448  amrex::ignore_unused(amrlev,mglev,rhs);
1449 #if (AMREX_SPACEDIM != 3)
1450  if (!m_has_metric_term) { return; }
1451 
1452  const int ncomp = rhs.nComp();
1453 
1454  bool cc = rhs.ixType().cellCentered(0);
1455 
1456  const Geometry& geom = this->m_geom[amrlev][mglev];
1457  const RT dx = static_cast<RT>(geom.CellSize(0));
1458  const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1459 
1460 #ifdef AMREX_USE_OMP
1461 #pragma omp parallel if (Gpu::notInLaunchRegion())
1462 #endif
1463  for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1464  {
1465  const Box& tbx = mfi.tilebox();
1466  auto const& rhsarr = rhs.array(mfi);
1467 #if (AMREX_SPACEDIM == 1)
1468  if (cc) {
1469  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1470  {
1471  RT rc = probxlo + (i+RT(0.5))*dx;
1472  rhsarr(i,j,k,n) *= rc*rc;
1473  });
1474  } else {
1475  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1476  {
1477  RT re = probxlo + i*dx;
1478  rhsarr(i,j,k,n) *= re*re;
1479  });
1480  }
1481 #elif (AMREX_SPACEDIM == 2)
1482  if (cc) {
1483  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1484  {
1485  RT rc = probxlo + (i+RT(0.5))*dx;
1486  rhsarr(i,j,k,n) *= rc;
1487  });
1488  } else {
1489  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1490  {
1491  RT re = probxlo + i*dx;
1492  rhsarr(i,j,k,n) *= re;
1493  });
1494  }
1495 #endif
1496  }
1497 #endif
1498 }
1499 
1500 template <typename MF>
1501 void
1502 MLCellLinOpT<MF>::unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const
1503 {
1504  amrex::ignore_unused(amrlev,mglev,rhs);
1505 #if (AMREX_SPACEDIM != 3)
1506  if (!m_has_metric_term) { return; }
1507 
1508  const int ncomp = rhs.nComp();
1509 
1510  bool cc = rhs.ixType().cellCentered(0);
1511 
1512  const Geometry& geom = this->m_geom[amrlev][mglev];
1513  const RT dx = static_cast<RT>(geom.CellSize(0));
1514  const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1515 
1516 #ifdef AMREX_USE_OMP
1517 #pragma omp parallel if (Gpu::notInLaunchRegion())
1518 #endif
1519  for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1520  {
1521  const Box& tbx = mfi.tilebox();
1522  auto const& rhsarr = rhs.array(mfi);
1523 #if (AMREX_SPACEDIM == 1)
1524  if (cc) {
1525  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1526  {
1527  RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1528  rhsarr(i,j,k,n) *= rcinv*rcinv;
1529  });
1530  } else {
1531  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1532  {
1533  RT re = probxlo + i*dx;
1534  RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1535  rhsarr(i,j,k,n) *= reinv*reinv;
1536  });
1537  }
1538 #elif (AMREX_SPACEDIM == 2)
1539  if (cc) {
1540  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1541  {
1542  RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1543  rhsarr(i,j,k,n) *= rcinv;
1544  });
1545  } else {
1546  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1547  {
1548  RT re = probxlo + i*dx;
1549  RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1550  rhsarr(i,j,k,n) *= reinv;
1551  });
1552  }
1553 #endif
1554  }
1555 #endif
1556 }
1557 
1558 template <typename MF>
1559 auto
1560 MLCellLinOpT<MF>::getSolvabilityOffset (int amrlev, int mglev, MF const& rhs) const
1561  -> Vector<RT>
1562 {
1563  computeVolInv();
1564 
1565  const int ncomp = this->getNComp();
1566  Vector<RT> offset(ncomp);
1567 
1568 #ifdef AMREX_USE_EB
1569  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
1570  if (factory && !factory->isAllRegular())
1571  {
1572  if constexpr (std::is_same<MF,MultiFab>()) {
1573  const MultiFab& vfrac = factory->getVolFrac();
1574  for (int c = 0; c < ncomp; ++c) {
1575  offset[c] = amrex::Dot(rhs, c, vfrac, 0, 1, IntVect(0), true)
1576  * m_volinv[amrlev][mglev];
1577  }
1578  } else {
1579  amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1580  }
1581  }
1582  else
1583 #endif
1584  {
1585  for (int c = 0; c < ncomp; ++c) {
1586  offset[c] = rhs.sum(c,IntVect(0),true) * m_volinv[amrlev][mglev];
1587  }
1588  }
1589 
1591 
1592  return offset;
1593 }
1594 
1595 template <typename MF>
1596 void
1597 MLCellLinOpT<MF>::fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& rhs,
1598  Vector<RT> const& offset) const
1599 {
1600  const int ncomp = this->getNComp();
1601  for (int c = 0; c < ncomp; ++c) {
1602  rhs.plus(-offset[c], c, 1);
1603  }
1604 #ifdef AMREX_USE_EB
1605  if (!rhs.isAllRegular()) {
1606  if constexpr (std::is_same<MF,MultiFab>()) {
1607  amrex::EB_set_covered(rhs, 0, ncomp, 0, 0.0_rt);
1608  } else {
1609  amrex::Abort("amrex::EB_set_covered only works with MultiFab");
1610  }
1611  }
1612 #endif
1613 }
1614 
1615 template <typename MF>
1616 void
1618 {
1619  BL_PROFILE("MLCellLinOp::prepareForSolve()");
1620 
1621  const int imaxorder = this->maxorder;
1622  const int ncomp = this->getNComp();
1623  const int hidden_direction = this->hiddenDirection();
1624  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
1625  {
1626  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
1627  {
1628  const auto& bcondloc = *m_bcondloc[amrlev][mglev];
1629  const auto& maskvals = m_maskvals[amrlev][mglev];
1630 
1631  const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));
1632  const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? this->m_geom[amrlev][mglev].InvCellSize(1) : Real(1.0));
1633  const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? this->m_geom[amrlev][mglev].InvCellSize(2) : Real(1.0));
1634 
1635  auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
1636  MF foo(this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev], ncomp, 0, MFInfo().SetAlloc(false));
1637 
1638 #ifdef AMREX_USE_EB
1639  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->m_factory[amrlev][mglev].get());
1640  const FabArray<EBCellFlagFab>* flags =
1641  (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1642  auto area = (factory) ? factory->getAreaFrac()
1643  : Array<const MultiCutFab*,AMREX_SPACEDIM>{AMREX_D_DECL(nullptr,nullptr,nullptr)};
1644  amrex::ignore_unused(area);
1645 #endif
1646 
1647 #ifdef AMREX_USE_GPU
1648  if (Gpu::inLaunchRegion()) {
1649 #ifdef AMREX_USE_EB
1650  if (factory && !factory->isAllRegular()) {
1651  if constexpr (!std::is_same<MF,MultiFab>()) {
1652  amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1653  } else {
1654  Vector<MLMGPSEBTag<RT>> tags;
1655  tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1656 
1657  for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1658  {
1659  const Box& vbx = mfi.validbox();
1660 
1661  const auto & bdlv = bcondloc.bndryLocs(mfi);
1662  const auto & bdcv = bcondloc.bndryConds(mfi);
1663 
1664  auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1665 
1666  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1667  {
1668  if (idim != hidden_direction && fabtyp != FabType::covered) {
1669  const Orientation olo(idim,Orientation::low);
1670  const Orientation ohi(idim,Orientation::high);
1671  auto const& ap = (fabtyp == FabType::singlevalued)
1672  ? area[idim]->const_array(mfi) : Array4<Real const>{};
1673  for (int icomp = 0; icomp < ncomp; ++icomp) {
1674  tags.emplace_back(MLMGPSEBTag<RT>{undrrelxr[olo].array(mfi),
1675  undrrelxr[ohi].array(mfi),
1676  ap,
1677  maskvals[olo].const_array(mfi),
1678  maskvals[ohi].const_array(mfi),
1679  bdlv[icomp][olo], bdlv[icomp][ohi],
1680  amrex::adjCell(vbx,olo),
1681  bdcv[icomp][olo], bdcv[icomp][ohi],
1682  vbx.length(idim), icomp, idim});
1683  }
1684  }
1685  }
1686  }
1687 
1688  ParallelFor(tags,
1689  [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSEBTag<RT> const& tag) noexcept
1690  {
1691  if (tag.ap) {
1692  if (tag.dir == 0)
1693  {
1694  mllinop_comp_interp_coef0_x_eb
1695  (0, i , j, k, tag.blen, tag.flo, tag.mlo, tag.ap,
1696  tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1697  mllinop_comp_interp_coef0_x_eb
1698  (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1699  tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1700  }
1701 #if (AMREX_SPACEDIM > 1)
1702  else
1703 #if (AMREX_SPACEDIM > 2)
1704  if (tag.dir == 1)
1705 #endif
1706  {
1707  mllinop_comp_interp_coef0_y_eb
1708  (0, i, j , k, tag.blen, tag.flo, tag.mlo, tag.ap,
1709  tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1710  mllinop_comp_interp_coef0_y_eb
1711  (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1712  tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1713  }
1714 #if (AMREX_SPACEDIM > 2)
1715  else {
1716  mllinop_comp_interp_coef0_z_eb
1717  (0, i, j, k , tag.blen, tag.flo, tag.mlo, tag.ap,
1718  tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1719  mllinop_comp_interp_coef0_z_eb
1720  (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi, tag.ap,
1721  tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1722  }
1723 #endif
1724 #endif
1725  } else {
1726  if (tag.dir == 0)
1727  {
1729  (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1730  tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1732  (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1733  tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1734  }
1735 #if (AMREX_SPACEDIM > 1)
1736  else
1737 #if (AMREX_SPACEDIM > 2)
1738  if (tag.dir == 1)
1739 #endif
1740  {
1742  (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1743  tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1745  (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1746  tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1747  }
1748 #if (AMREX_SPACEDIM > 2)
1749  else {
1751  (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1752  tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1754  (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1755  tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1756  }
1757 #endif
1758 #endif
1759  }
1760  });
1761  }
1762  } else
1763 #endif
1764  {
1765  Vector<MLMGPSTag<RT>> tags;
1766  tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1767 
1768  for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1769  {
1770  const Box& vbx = mfi.validbox();
1771 
1772  const auto & bdlv = bcondloc.bndryLocs(mfi);
1773  const auto & bdcv = bcondloc.bndryConds(mfi);
1774 
1775  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1776  {
1777  if (idim != hidden_direction) {
1778  const Orientation olo(idim,Orientation::low);
1779  const Orientation ohi(idim,Orientation::high);
1780  for (int icomp = 0; icomp < ncomp; ++icomp) {
1781  tags.emplace_back(MLMGPSTag<RT>{undrrelxr[olo].array(mfi),
1782  undrrelxr[ohi].array(mfi),
1783  maskvals[olo].const_array(mfi),
1784  maskvals[ohi].const_array(mfi),
1785  bdlv[icomp][olo], bdlv[icomp][ohi],
1786  amrex::adjCell(vbx,olo),
1787  bdcv[icomp][olo], bdcv[icomp][ohi],
1788  vbx.length(idim), icomp, idim});
1789  }
1790  }
1791  }
1792  }
1793 
1794  ParallelFor(tags,
1795  [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSTag<RT> const& tag) noexcept
1796  {
1797  if (tag.dir == 0)
1798  {
1799  mllinop_comp_interp_coef0_x
1800  (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1801  tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1802  mllinop_comp_interp_coef0_x
1803  (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1804  tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1805  }
1806 #if (AMREX_SPACEDIM > 1)
1807  else
1808 #if (AMREX_SPACEDIM > 2)
1809  if (tag.dir == 1)
1810 #endif
1811  {
1812  mllinop_comp_interp_coef0_y
1813  (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1814  tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1815  mllinop_comp_interp_coef0_y
1816  (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1817  tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1818  }
1819 #if (AMREX_SPACEDIM > 2)
1820  else {
1822  (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1823  tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1825  (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1826  tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1827  }
1828 #endif
1829 #endif
1830  });
1831  }
1832  } else
1833 #endif
1834  {
1835 #ifdef AMREX_USE_OMP
1836 #pragma omp parallel
1837 #endif
1838  for (MFIter mfi(foo, MFItInfo{}.SetDynamic(true)); mfi.isValid(); ++mfi)
1839  {
1840  const Box& vbx = mfi.validbox();
1841 
1842  const auto & bdlv = bcondloc.bndryLocs(mfi);
1843  const auto & bdcv = bcondloc.bndryConds(mfi);
1844 
1845 #ifdef AMREX_USE_EB
1846  auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1847 #endif
1848  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1849  {
1850  if (idim == hidden_direction) { continue; }
1851  const Orientation olo(idim,Orientation::low);
1852  const Orientation ohi(idim,Orientation::high);
1853  const Box blo = amrex::adjCellLo(vbx, idim);
1854  const Box bhi = amrex::adjCellHi(vbx, idim);
1855  const int blen = vbx.length(idim);
1856  const auto& mlo = maskvals[olo].array(mfi);
1857  const auto& mhi = maskvals[ohi].array(mfi);
1858  const auto& flo = undrrelxr[olo].array(mfi);
1859  const auto& fhi = undrrelxr[ohi].array(mfi);
1860  for (int icomp = 0; icomp < ncomp; ++icomp) {
1861  const BoundCond bctlo = bdcv[icomp][olo];
1862  const BoundCond bcthi = bdcv[icomp][ohi];
1863  const auto bcllo = bdlv[icomp][olo];
1864  const auto bclhi = bdlv[icomp][ohi];
1865 #ifdef AMREX_USE_EB
1866  if (fabtyp == FabType::singlevalued) {
1867  if constexpr (!std::is_same<MF,MultiFab>()) {
1868  amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1869  } else {
1870  auto const& ap = area[idim]->const_array(mfi);
1871  if (idim == 0) {
1872  mllinop_comp_interp_coef0_x_eb
1873  (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1874  imaxorder, dxi, icomp);
1875  mllinop_comp_interp_coef0_x_eb
1876  (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1877  imaxorder, dxi, icomp);
1878  } else if (idim == 1) {
1879  mllinop_comp_interp_coef0_y_eb
1880  (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1881  imaxorder, dyi, icomp);
1882  mllinop_comp_interp_coef0_y_eb
1883  (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1884  imaxorder, dyi, icomp);
1885  } else {
1886  mllinop_comp_interp_coef0_z_eb
1887  (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1888  imaxorder, dzi, icomp);
1889  mllinop_comp_interp_coef0_z_eb
1890  (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1891  imaxorder, dzi, icomp);
1892  }
1893  }
1894  } else if (fabtyp == FabType::regular)
1895 #endif
1896  {
1897  if (idim == 0) {
1899  (0, blo, blen, flo, mlo, bctlo, bcllo,
1900  imaxorder, dxi, icomp);
1902  (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1903  imaxorder, dxi, icomp);
1904  } else if (idim == 1) {
1906  (0, blo, blen, flo, mlo, bctlo, bcllo,
1907  imaxorder, dyi, icomp);
1909  (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1910  imaxorder, dyi, icomp);
1911  } else {
1913  (0, blo, blen, flo, mlo, bctlo, bcllo,
1914  imaxorder, dzi, icomp);
1916  (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1917  imaxorder, dzi, icomp);
1918  }
1919  }
1920  }
1921  }
1922  }
1923  }
1924  }
1925  }
1926 }
1927 
1928 template <typename MF>
1929 auto
1930 MLCellLinOpT<MF>::xdoty (int /*amrlev*/, int /*mglev*/, const MF& x, const MF& y, bool local) const
1931  -> RT
1932 {
1933  const int ncomp = this->getNComp();
1934  const IntVect nghost(0);
1935  RT result = amrex::Dot(x,0,y,0,ncomp,nghost,true);
1936  if (!local) {
1938  }
1939  return result;
1940 }
1941 
1942 template <typename MF>
1943 void
1945 {
1946  if (!m_volinv.empty()) { return; }
1947 
1948  m_volinv.resize(this->m_num_amr_levels);
1949  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
1950  m_volinv[amrlev].resize(this->NMGLevels(amrlev));
1951  }
1952 
1953  // We don't need to compute for every level
1954 
1955  auto f = [&] (int amrlev, int mglev) {
1956 #ifdef AMREX_USE_EB
1957  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
1958  if (factory && !factory->isAllRegular())
1959  {
1960  if constexpr (std::is_same<MF,MultiFab>()) {
1961  const auto& vfrac = factory->getVolFrac();
1962  m_volinv[amrlev][mglev] = vfrac.sum(0,true);
1963  } else {
1964  amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1965  }
1966  }
1967  else
1968 #endif
1969  {
1970  if (this->m_coarse_fine_bc_type == LinOpBCType::Dirichlet) {
1971  m_volinv[amrlev][mglev]
1972  = RT(1.0 / this->compactify(this->Geom(amrlev,mglev).Domain()).d_numPts());
1973  } else {
1974  m_volinv[amrlev][mglev]
1975  = RT(1.0 / this->m_grids[amrlev][mglev].d_numPts());
1976  }
1977  }
1978  };
1979 
1980  // amrlev = 0, mglev = 0
1981  f(0,0);
1982 
1983  int mgbottom = this->NMGLevels(0)-1;
1984  f(0,mgbottom);
1985 
1986 #ifdef AMREX_USE_EB
1987  RT temp1, temp2;
1988  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(0,0));
1989  if (factory && !factory->isAllRegular())
1990  {
1991  ParallelAllReduce::Sum<RT>({m_volinv[0][0], m_volinv[0][mgbottom]},
1993  temp1 = RT(1.0)/m_volinv[0][0];
1994  temp2 = RT(1.0)/m_volinv[0][mgbottom];
1995  }
1996  else
1997  {
1998  temp1 = m_volinv[0][0];
1999  temp2 = m_volinv[0][mgbottom];
2000  }
2001  m_volinv[0][0] = temp1;
2002  m_volinv[0][mgbottom] = temp2;
2003 #endif
2004 }
2005 
2006 template <typename MF>
2007 auto
2008 MLCellLinOpT<MF>::normInf (int amrlev, MF const& mf, bool local) const -> RT
2009 {
2010  const int ncomp = this->getNComp();
2011  const int finest_level = this->NAMRLevels() - 1;
2012  RT norm = RT(0.0);
2013 #ifdef AMREX_USE_EB
2014  const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev));
2015  if (factory && !factory->isAllRegular()) {
2016  if constexpr (!std::is_same<MF,MultiFab>()) {
2017  amrex::Abort("MLCellLinOpT with EB only works with MultiFab");
2018  } else {
2019  const MultiFab& vfrac = factory->getVolFrac();
2020  if (amrlev == finest_level) {
2021 #ifdef AMREX_USE_GPU
2022  if (Gpu::inLaunchRegion()) {
2023  auto const& ma = mf.const_arrays();
2024  auto const& vfrac_ma = vfrac.const_arrays();
2026  mf, IntVect(0), ncomp,
2027  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2028  -> GpuTuple<Real>
2029  {
2030  return std::abs(ma[box_no](i,j,k,n)
2031  *vfrac_ma[box_no](i,j,k));
2032  });
2033  } else
2034 #endif
2035  {
2036 #ifdef AMREX_USE_OMP
2037 #pragma omp parallel reduction(max:norm)
2038 #endif
2039  for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2040  Box const& bx = mfi.tilebox();
2041  auto const& fab = mf.const_array(mfi);
2042  auto const& v = vfrac.const_array(mfi);
2043  AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2044  {
2045  norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2046  });
2047  }
2048  }
2049  } else {
2050 #ifdef AMREX_USE_GPU
2051  if (Gpu::inLaunchRegion()) {
2052  auto const& ma = mf.const_arrays();
2053  auto const& mask_ma = m_norm_fine_mask[amrlev]->const_arrays();
2054  auto const& vfrac_ma = vfrac.const_arrays();
2056  mf, IntVect(0), ncomp,
2057  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2058  -> GpuTuple<Real>
2059  {
2060  if (mask_ma[box_no](i,j,k)) {
2061  return std::abs(ma[box_no](i,j,k,n)
2062  *vfrac_ma[box_no](i,j,k));
2063  } else {
2064  return Real(0.0);
2065  }
2066  });
2067  } else
2068 #endif
2069  {
2070 #ifdef AMREX_USE_OMP
2071 #pragma omp parallel reduction(max:norm)
2072 #endif
2073  for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2074  Box const& bx = mfi.tilebox();
2075  auto const& fab = mf.const_array(mfi);
2076  auto const& mask = m_norm_fine_mask[amrlev]->const_array(mfi);
2077  auto const& v = vfrac.const_array(mfi);
2078  AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2079  {
2080  if (mask(i,j,k)) {
2081  norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2082  }
2083  });
2084  }
2085  }
2086  }
2087  }
2088  } else
2089 #endif
2090  {
2091  if (amrlev == finest_level) {
2092  norm = mf.norminf(0, ncomp, IntVect(0), true);
2093  } else {
2094  norm = mf.norminf(*m_norm_fine_mask[amrlev], 0, ncomp, IntVect(0), true);
2095  }
2096  }
2097 
2099  return norm;
2100 }
2101 
2102 template <typename MF>
2103 void
2105 {
2106  int ncomp = this->getNComp();
2107  for (int falev = this->NAMRLevels()-1; falev > 0; --falev)
2108  {
2109 #ifdef AMREX_USE_EB
2110  if (!sol[falev].isAllRegular()) {
2111  if constexpr (std::is_same<MF,MultiFab>()) {
2112  amrex::EB_average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatio(falev-1));
2113  } else {
2114  amrex::Abort("EB_average_down only works with MultiFab");
2115  }
2116  } else
2117 #endif
2118  {
2119  amrex::average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatio(falev-1));
2120  }
2121  }
2122 }
2123 
2124 template <typename MF>
2125 void
2126 MLCellLinOpT<MF>::avgDownResAmr (int clev, MF& cres, MF const& fres) const
2127 {
2128 #ifdef AMREX_USE_EB
2129  if (!fres.isAllRegular()) {
2130  if constexpr (std::is_same<MF,MultiFab>()) {
2131  amrex::EB_average_down(fres, cres, 0, this->getNComp(),
2132  this->AMRRefRatio(clev));
2133  } else {
2134  amrex::Abort("EB_average_down only works with MultiFab");
2135  }
2136  } else
2137 #endif
2138  {
2139  amrex::average_down(fres, cres, 0, this->getNComp(),
2140  this->AMRRefRatio(clev));
2141  }
2142 }
2143 
2144 extern template class MLCellLinOpT<MultiFab>;
2145 
2147 
2148 }
2149 
2150 #endif
#define zero
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:49
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition: AMReX_GpuLaunch.nolint.H:16
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition: AMReX_GpuLaunch.nolint.H:54
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
Box cbx
Definition: AMReX_HypreMLABecLap.cpp:1091
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition: AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition: AMReX_Loop.nolint.H:16
void amrex_mllinop_apply_bc(const int *lo, const int *hi, amrex_real *phi, const int *philo, const int *phihi, const int *mask, const int *mlo, const int *mhi, int cdir, int bct, amrex_real bcl, const amrex_real *bcval, const int *blo, const int *bhi, int maxorder, const amrex_real *dxinv, int inhomog, int nc, int cross)
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
if(!(yy_init))
Definition: amrex_iparser.lex.nolint.H:935
const FabSetT< MF > & bndryValues(Orientation face) const noexcept
Return FabSet on given face.
Definition: AMReX_BndryData.H:77
Maintain an identifier for boundary condition types.
Definition: AMReX_BoundCond.H:20
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
void define(const Box &bx)
Initialize the BoxArray from a single box. It is an error if the BoxArray has already been initialize...
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:596
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
static AMREX_GPU_HOST_DEVICE BoxND TheUnitBox() noexcept
This static member function returns a constant reference to an object of type BoxND representing the ...
Definition: AMReX_Box.H:737
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
const Real * CellSize() const noexcept
Returns the cellsize for each coordinate direction.
Definition: AMReX_CoordSys.H:71
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
Definition: AMReX_EBFabFactory.H:22
const MultiFab & getVolFrac() const noexcept
Definition: AMReX_EBFabFactory.H:53
An Array of FortranArrayBox(FAB)-like Objects.
Definition: AMReX_FabArray.H:344
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1593
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition: AMReX_FabArray.H:1675
Definition: AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
const Real * ProbLo() const noexcept
Returns the lo end of the problem domain in each dimension.
Definition: AMReX_Geometry.H:178
GpuArray< int, AMREX_SPACEDIM > isPeriodicArray() const noexcept
Definition: AMReX_Geometry.H:347
Periodicity periodicity() const noexcept
Definition: AMReX_Geometry.H:355
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition: AMReX_Geometry.H:210
Definition: AMReX_Tuple.H:93
Definition: AMReX_GpuElixir.H:13
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition: AMReX_IntVect.H:418
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheCellVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:711
An InterpBndryData object adds to a BndryData object the ability to manipulate and set the data store...
Definition: AMReX_InterpBndryData.H:40
a one-thingy-per-box distributed object
Definition: AMReX_LayoutData.H:13
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_MLCellLinOp.H:160
GpuArray< BCTL, 2 *AMREX_SPACEDIM > const * getBCTLPtr(const MFIter &mfi) const noexcept
Definition: AMReX_MLCellLinOp.H:184
const Vector< RealTuple > & bndryLocs(const MFIter &mfi) const noexcept
Definition: AMReX_MLCellLinOp.H:175
Gpu::DeviceVector< GpuArray< BCTL, 2 *AMREX_SPACEDIM > > bctl_dv
Definition: AMReX_MLCellLinOp.H:191
BndryCondLoc(const BoxArray &ba, const DistributionMapping &dm, int ncomp)
Definition: AMReX_MLCellLinOp.H:281
LayoutData< GpuArray< BCTL, 2 *AMREX_SPACEDIM > * > bctl
Definition: AMReX_MLCellLinOp.H:190
const BCTuple & bndryConds(const MFIter &mfi, int icomp) const noexcept
Definition: AMReX_MLCellLinOp.H:178
int m_ncomp
Definition: AMReX_MLCellLinOp.H:192
const RealTuple & bndryLocs(const MFIter &mfi, int icomp) const noexcept
Definition: AMReX_MLCellLinOp.H:181
void setLOBndryConds(const Geometry &geom, const Real *dx, const Vector< Array< BCType, AMREX_SPACEDIM > > &lobc, const Vector< Array< BCType, AMREX_SPACEDIM > > &hibc, IntVect const &ratio, const RealVect &interior_bloc, const Array< Real, AMREX_SPACEDIM > &domain_bloc_lo, const Array< Real, AMREX_SPACEDIM > &domain_bloc_hi, LinOpBCType crse_fine_bc_type)
Definition: AMReX_MLCellLinOp.H:302
LayoutData< Vector< RealTuple > > bcloc
Definition: AMReX_MLCellLinOp.H:189
const Vector< BCTuple > & bndryConds(const MFIter &mfi) const noexcept
Definition: AMReX_MLCellLinOp.H:172
LayoutData< Vector< BCTuple > > bcond
Definition: AMReX_MLCellLinOp.H:188
Definition: AMReX_MLCellLinOp.H:21
void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary=false) const final
Smooth.
Definition: AMReX_MLCellLinOp.H:1206
virtual void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const =0
Vector< RT > getSolvabilityOffset(int amrlev, int mglev, MF const &rhs) const override
get offset for fixing solvability
Definition: AMReX_MLCellLinOp.H:1560
typename MF::fab_type FAB
Definition: AMReX_MLCellLinOp.H:24
void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs) override
Average-down data from fine AMR level to coarse AMR level.
Definition: AMReX_MLCellLinOp.H:1185
void averageDownAndSync(Vector< MF > &sol) const override
Definition: AMReX_MLCellLinOp.H:2104
BoxArray makeNGrids(int grid_size) const
Definition: AMReX_MLCellLinOp.H:897
Vector< YAFluxRegisterT< MF > > m_fluxreg
Definition: AMReX_MLCellLinOp.H:204
virtual void applyBC(int amrlev, int mglev, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr, bool skip_fillboundary=false) const
Definition: AMReX_MLCellLinOp.H:684
void updateSolBC(int amrlev, const MF &crse_bcdata) const
Definition: AMReX_MLCellLinOp.H:653
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_sol
Definition: AMReX_MLCellLinOp.H:148
void compGrad(int amrlev, const Array< MF *, AMREX_SPACEDIM > &grad, MF &sol, Location loc) const override
Compute gradients of the solution.
Definition: AMReX_MLCellLinOp.H:1392
void avgDownResAmr(int clev, MF &cres, MF const &fres) const override
Definition: AMReX_MLCellLinOp.H:2126
void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &, MF &, MF &fine_sol, const MF &) const final
Reflux at AMR coarse/fine boundary.
Definition: AMReX_MLCellLinOp.H:1274
virtual void Fapply(int amrlev, int mglev, MF &out, const MF &in) const =0
void defineBC()
Definition: AMReX_MLCellLinOp.H:431
void update() override
Update for reuse.
Definition: AMReX_MLCellLinOp.H:646
typename MF::value_type RT
Definition: AMReX_MLCellLinOp.H:25
LinOpBCType BCType
Definition: AMReX_MLCellLinOp.H:27
void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr) final
Compute residual for the residual-correction form, resid = b - L(x)
Definition: AMReX_MLCellLinOp.H:1248
MLCellLinOpT(const MLCellLinOpT< MF > &)=delete
typename MLLinOpT< MF >::BCMode BCMode
Definition: AMReX_MLCellLinOp.H:28
RT normInf(int amrlev, MF const &mf, bool local) const override
Definition: AMReX_MLCellLinOp.H:2008
Array< RT, 2 *BL_SPACEDIM > RealTuple
Definition: AMReX_MLCellLinOp.H:157
virtual bool isCrossStencil() const
Definition: AMReX_MLCellLinOp.H:60
void updateCorBC(int amrlev, const MF &crse_bcdata) const
Definition: AMReX_MLCellLinOp.H:669
Array< BoundCond, 2 *BL_SPACEDIM > BCTuple
Definition: AMReX_MLCellLinOp.H:158
void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const override
Add interpolated coarse MG level data to fine MG level data.
Definition: AMReX_MLCellLinOp.H:956
virtual void addInhomogNeumannFlux(int, const Array< MF *, AMREX_SPACEDIM > &, MF const &, bool) const
Definition: AMReX_MLCellLinOp.H:122
RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const final
x dot y, used by the bottom solver
Definition: AMReX_MLCellLinOp.H:1930
void prepareForSolve() override
Definition: AMReX_MLCellLinOp.H:1617
void unapplyMetricTerm(int amrlev, int mglev, MF &rhs) const final
unapply metric terms if there are any
Definition: AMReX_MLCellLinOp.H:1502
void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const override
Apply the linear operator, out = L(in)
Definition: AMReX_MLCellLinOp.H:1196
void applyMetricTerm(int amrlev, int mglev, MF &rhs) const final
apply metric terms if there are any
Definition: AMReX_MLCellLinOp.H:1446
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_cor_br
Definition: AMReX_MLCellLinOp.H:152
Vector< Vector< std::unique_ptr< BndryCondLoc > > > m_bcondloc
Definition: AMReX_MLCellLinOp.H:194
void setGaussSeidel(bool flag) noexcept
Definition: AMReX_MLCellLinOp.H:58
void defineAuxData()
Definition: AMReX_MLCellLinOp.H:368
Vector< std::unique_ptr< MF > > m_robin_bcval
Definition: AMReX_MLCellLinOp.H:138
Vector< Vector< Array< MultiMask, 2 *AMREX_SPACEDIM > > > m_maskvals
Definition: AMReX_MLCellLinOp.H:200
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLCellLinOp.H:355
~MLCellLinOpT() override=default
virtual bool isTensorOp() const
Definition: AMReX_MLCellLinOp.H:61
void restriction(int, int, MF &crse, MF &fine) const override
Restriction onto coarse MG level.
Definition: AMReX_MLCellLinOp.H:947
Vector< Vector< BndryRegisterT< MF > > > m_undrrelxr
Definition: AMReX_MLCellLinOp.H:197
int m_interpbndry_halfwidth
Definition: AMReX_MLCellLinOp.H:216
MLCellLinOpT()
Definition: AMReX_MLCellLinOp.H:348
MLCellLinOpT(MLCellLinOpT< MF > &&)=delete
MLCellLinOpT< MF > & operator=(const MLCellLinOpT< MF > &)=delete
void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const override
Overwrite fine MG level data with interpolated coarse data.
Definition: AMReX_MLCellLinOp.H:1003
void fixSolvabilityByOffset(int amrlev, int mglev, MF &rhs, Vector< RT > const &offset) const override
Definition: AMReX_MLCellLinOp.H:1597
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_cor
Definition: AMReX_MLCellLinOp.H:151
typename MLLinOpT< MF >::Location Location
Definition: AMReX_MLCellLinOp.H:30
void prepareForFluxes(int amrlev, const MF *crse_bcdata=nullptr) override
Definition: AMReX_MLCellLinOp.H:1239
void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr) override
Compute residual for solution.
Definition: AMReX_MLCellLinOp.H:1221
Vector< Vector< RT > > m_volinv
Definition: AMReX_MLCellLinOp.H:214
bool m_has_metric_term
Definition: AMReX_MLCellLinOp.H:146
bool m_use_gauss_seidel
Definition: AMReX_MLCellLinOp.H:206
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_sol_br
Definition: AMReX_MLCellLinOp.H:149
void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const override
Interpolation between AMR levels.
Definition: AMReX_MLCellLinOp.H:1096
void compFlux(int amrlev, const Array< MF *, AMREX_SPACEDIM > &fluxes, MF &sol, Location loc) const override
Compute fluxes.
Definition: AMReX_MLCellLinOp.H:1348
void computeVolInv() const
Definition: AMReX_MLCellLinOp.H:1944
Vector< std::unique_ptr< iMultiFab > > m_norm_fine_mask
Definition: AMReX_MLCellLinOp.H:202
virtual void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, AMREX_SPACEDIM > &flux, const FAB &sol, Location loc, int face_only=0) const =0
typename MLLinOpT< MF >::StateMode StateMode
Definition: AMReX_MLCellLinOp.H:29
void setLevelBC(int amrlev, const MF *levelbcdata, const MF *robinbc_a=nullptr, const MF *robinbc_b=nullptr, const MF *robinbc_f=nullptr) final
Set boundary conditions for given level. For cell-centered solves only.
Definition: AMReX_MLCellLinOp.H:513
bool needsUpdate() const override
Does it need update if it's reused?
Definition: AMReX_MLCellLinOp.H:53
Definition: AMReX_MLLinOp.H:98
const Geometry & Geom(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:559
const MF * m_coarse_data_for_bc
Definition: AMReX_MLLinOp.H:628
Vector< Vector< std::unique_ptr< FabFactory< FAB > > > > m_factory
Definition: AMReX_MLLinOp.H:601
const Vector< int > & AMRRefRatio() const noexcept
Return AMR refinement ratios.
Definition: AMReX_MLLinOp.H:632
IntVect m_ixtype
Definition: AMReX_MLLinOp.H:589
Array< Real, AMREX_SPACEDIM > m_domain_bloc_hi
Definition: AMReX_MLLinOp.H:622
RealVect m_coarse_bc_loc
Definition: AMReX_MLLinOp.H:627
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition: AMReX_MLLinOp.H:266
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc_orig
Definition: AMReX_MLLinOp.H:567
FabFactory< FAB > const * Factory(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:637
bool hasRobinBC() const noexcept
Definition: AMReX_MLLinOp.H:1265
Array< Real, AMREX_SPACEDIM > m_domain_bloc_lo
Definition: AMReX_MLLinOp.H:621
Vector< Vector< BoxArray > > m_grids
Definition: AMReX_MLLinOp.H:599
virtual int getNComp() const
Return number of components.
Definition: AMReX_MLLinOp.H:261
Vector< int > m_amr_ref_ratio
Definition: AMReX_MLLinOp.H:584
Vector< int > m_num_mg_levels
Definition: AMReX_MLLinOp.H:586
Vector< Vector< DistributionMapping > > m_dmap
Definition: AMReX_MLLinOp.H:600
IntVect m_coarse_data_crse_ratio
Definition: AMReX_MLLinOp.H:626
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc_orig
Definition: AMReX_MLLinOp.H:566
bool needsCoarseDataForBC() const noexcept
Needs coarse data for bc?
Definition: AMReX_MLLinOp.H:177
virtual void update()
Update for reuse.
Definition: AMReX_MLLinOp.H:268
bool hasHiddenDimension() const noexcept
Definition: AMReX_MLLinOp.H:695
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc
Definition: AMReX_MLLinOp.H:562
int hiddenDirection() const noexcept
Definition: AMReX_MLLinOp.H:696
Vector< Vector< Geometry > > m_geom
first Vector is for amr level and second is mg level
Definition: AMReX_MLLinOp.H:598
Box compactify(Box const &b) const noexcept
Definition: AMReX_MLLinOp.H:1272
int maxorder
Definition: AMReX_MLLinOp.H:579
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition: AMReX_MLLinOp.H:757
Vector< IntVect > mg_coarsen_ratio_vec
Definition: AMReX_MLLinOp.H:595
LPInfo info
Definition: AMReX_MLLinOp.H:575
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc
Definition: AMReX_MLLinOp.H:563
LinOpBCType m_coarse_fine_bc_type
Definition: AMReX_MLLinOp.H:625
int m_num_amr_levels
Definition: AMReX_MLLinOp.H:583
Definition: AMReX_MLMGBndry.H:12
static void setBoxBC(RealTuple &bloc, BCTuple &bctag, const Box &bx, const Box &domain, const Array< LinOpBCType, AMREX_SPACEDIM > &lo, const Array< LinOpBCType, AMREX_SPACEDIM > &hi, const Real *dx, IntVect const &ratio, const RealVect &interior_bloc, const Array< Real, AMREX_SPACEDIM > &domain_bloc_lo, const Array< Real, AMREX_SPACEDIM > &domain_bloc_hi, const GpuArray< int, AMREX_SPACEDIM > &is_periodic, LinOpBCType a_crse_fine_bc_type)
Definition: AMReX_MLMGBndry.H:107
Definition: AMReX_Mask.H:28
A collection (stored as an array) of FArrayBox objects.
Definition: AMReX_MultiFab.H:38
Real sum(int comp=0, bool local=false) const
Returns the sum of component "comp" over the MultiFab – no ghost cells are included.
Definition: AMReX_MultiFab.cpp:1262
An Iterator over the Orientation of Faces of a Box.
Definition: AMReX_Orientation.H:135
Encapsulation of the Orientation of the Faces of a Box.
Definition: AMReX_Orientation.H:29
@ low
Definition: AMReX_Orientation.H:34
@ high
Definition: AMReX_Orientation.H:34
Definition: AMReX_PODVector.H:246
void reserve(size_type a_capacity)
Definition: AMReX_PODVector.H:647
iterator begin() noexcept
Definition: AMReX_PODVector.H:601
iterator end() noexcept
Definition: AMReX_PODVector.H:605
void push_back(const T &a_value)
Definition: AMReX_PODVector.H:556
A Real vector in SpaceDim-dimensional space.
Definition: AMReX_RealVect.H:32
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition: AMReX_GpuContainers.H:233
static constexpr HostToDevice hostToDevice
Definition: AMReX_GpuContainers.H:98
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
void Sum(T &v, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:204
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:126
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
static int ff(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:59
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r2(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition: AMReX_MLMG_2D_K.H:9
@ max
Definition: AMReX_ParallelReduce.H:17
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition: AMReX_Box.H:1372
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r2(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition: AMReX_MLMG_1D_K.H:9
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
void EB_set_covered(MultiFab &mf, Real val)
Definition: AMReX_EBMultiFabUtil.cpp:21
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r4(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition: AMReX_MLMG_1D_K.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCell(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to adjCellLo and adjCellHi; operates on given face.
Definition: AMReX_Box.H:1634
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T abs(const GpuComplex< T > &a_z) noexcept
Return the absolute value of a complex number.
Definition: AMReX_GpuComplex.H:356
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition: AMReX_Tuple.H:179
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition: AMReX_MFIter.H:219
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray.
Definition: AMReX_ParReduce.H:47
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition: AMReX_EBMultiFabUtil.cpp:336
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition: AMReX_GpuComplex.H:344
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
void Xpay(MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src + a * dst
Definition: AMReX_FabArrayUtility.H:1654
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_bc_x(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dxinv, int inhomog, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:14
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_comp_interp_coef0_x(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dxinv, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:329
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_bc_z(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dzinv, int inhomog, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:224
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_comp_interp_coef0_y(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dyinv, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:410
FAB::value_type Dot(FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
Compute dot products of two FabArrays.
Definition: AMReX_FabArrayUtility.H:1554
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_comp_interp_coef0_z(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dzinv, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:491
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_bc_y(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dyinv, int inhomog, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:119
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
Definition: AMReX_MultiFabUtil.cpp:607
std::array< T, N > Array
Definition: AMReX_Array.H:24
integer, parameter dp
Definition: AMReX_SDCquadrature.F90:8
Definition: AMReX_Array4.H:61
Definition: AMReX_Dim3.H:12
int x
Definition: AMReX_Dim3.H:12
int z
Definition: AMReX_Dim3.H:12
int y
Definition: AMReX_Dim3.H:12
Definition: AMReX_Array.H:34
Definition: AMReX_MLLinOp.H:35
bool has_metric_term
Definition: AMReX_MLLinOp.H:43
StateMode
Definition: AMReX_MLLinOp.H:86
BCMode
Definition: AMReX_MLLinOp.H:85
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MFIter.H:20
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition: AMReX_MFIter.H:29
MFItInfo & SetDynamic(bool f) noexcept
Definition: AMReX_MFIter.H:34
Definition: AMReX_MLCellLinOp.H:133
BoundCond type
Definition: AMReX_MLCellLinOp.H:134
RT location
Definition: AMReX_MLCellLinOp.H:135
Definition: AMReX_MLCellLinOp.H:220
T bcloc_hi
Definition: AMReX_MLCellLinOp.H:227
Box bx
Definition: AMReX_MLCellLinOp.H:228
Array4< T const > bcval_hi
Definition: AMReX_MLCellLinOp.H:223
Array4< int const > mask_hi
Definition: AMReX_MLCellLinOp.H:225
int comp
Definition: AMReX_MLCellLinOp.H:232
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Box const & box() const noexcept
Definition: AMReX_MLCellLinOp.H:236
int dir
Definition: AMReX_MLCellLinOp.H:233
Array4< T const > bcval_lo
Definition: AMReX_MLCellLinOp.H:222
int blen
Definition: AMReX_MLCellLinOp.H:231
Array4< T > fab
Definition: AMReX_MLCellLinOp.H:221
BoundCond bctype_lo
Definition: AMReX_MLCellLinOp.H:229
T bcloc_lo
Definition: AMReX_MLCellLinOp.H:226
Array4< int const > mask_lo
Definition: AMReX_MLCellLinOp.H:224
BoundCond bctype_hi
Definition: AMReX_MLCellLinOp.H:230
Definition: AMReX_MLCellLinOp.H:240
int blen
Definition: AMReX_MLCellLinOp.H:250
BoundCond bcthi
Definition: AMReX_MLCellLinOp.H:249
int comp
Definition: AMReX_MLCellLinOp.H:251
Array4< T > flo
Definition: AMReX_MLCellLinOp.H:241
Array4< int const > mlo
Definition: AMReX_MLCellLinOp.H:243
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Box const & box() const noexcept
Definition: AMReX_MLCellLinOp.H:255
int dir
Definition: AMReX_MLCellLinOp.H:252
Array4< T > fhi
Definition: AMReX_MLCellLinOp.H:242
T bcllo
Definition: AMReX_MLCellLinOp.H:245
Box bx
Definition: AMReX_MLCellLinOp.H:247
BoundCond bctlo
Definition: AMReX_MLCellLinOp.H:248
Array4< int const > mhi
Definition: AMReX_MLCellLinOp.H:244
T bclhi
Definition: AMReX_MLCellLinOp.H:246
Struct for holding types.
Definition: AMReX_TypeList.H:12