Block-Structured AMR Software Framework
AMReX_MLCellABecLap.H
Go to the documentation of this file.
1 #ifndef AMREX_ML_CELL_ABECLAP_H_
2 #define AMREX_ML_CELL_ABECLAP_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MLCellLinOp.H>
7 
8 namespace amrex {
9 
10 template <typename MF>
11 class MLCellABecLapT // NOLINT(cppcoreguidelines-virtual-class-destructor)
12  : public MLCellLinOpT<MF>
13 {
14 public:
15 
16  using FAB = typename MF::fab_type;
17  using RT = typename MF::value_type;
18 
19  using Location = typename MLLinOpT<MF>::Location;
20 
21  MLCellABecLapT () = default;
22  ~MLCellABecLapT () override = default;
23 
28 
29  void define (const Vector<Geometry>& a_geom,
30  const Vector<BoxArray>& a_grids,
31  const Vector<DistributionMapping>& a_dmap,
32  const LPInfo& a_info = LPInfo(),
33  const Vector<FabFactory<FAB> const*>& a_factory = {});
34 
35  void define (const Vector<Geometry>& a_geom,
36  const Vector<BoxArray>& a_grids,
37  const Vector<DistributionMapping>& a_dmap,
38  const Vector<iMultiFab const*>& a_overset_mask,
39  const LPInfo& a_info = LPInfo(),
40  const Vector<FabFactory<FAB> const*>& a_factory = {});
41 
42  [[nodiscard]] iMultiFab const* getOversetMask (int amrlev, int mglev) const {
43  return m_overset_mask[amrlev][mglev].get();
44  }
45 
46  [[nodiscard]] bool needsUpdate () const override {
48  }
49  void update () override;
50 
51  void prepareForSolve () override;
52 
53  void setDirichletNodesToZero (int amrlev, int mglev, MF& mf) const override;
54 
55  void getFluxes (const Vector<Array<MF*,AMREX_SPACEDIM> >& a_flux,
56  const Vector<MF*>& a_sol,
57  Location a_loc) const final;
58  void getFluxes (const Vector<MF*>& /*a_flux*/,
59  const Vector<MF*>& /*a_sol*/) const final {
60  amrex::Abort("MLCellABecLap::getFluxes: How did we get here?");
61  }
62 
63  virtual RT getAScalar () const = 0;
64  virtual RT getBScalar () const = 0;
65  virtual MF const* getACoeffs (int amrlev, int mglev) const = 0;
66  virtual Array<MF const*,AMREX_SPACEDIM> getBCoeffs (int amrlev, int mglev) const = 0;
67 
68  void applyInhomogNeumannTerm (int amrlev, MF& rhs) const final;
69 
71  int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad,
72  MF const& sol, bool mult_bcoef) const final;
73 
74  void applyOverset (int amrlev, MF& rhs) const override;
75 
76 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
77  [[nodiscard]] std::unique_ptr<Hypre> makeHypre (Hypre::Interface hypre_interface) const override;
78 #endif
79 
80 #ifdef AMREX_USE_PETSC
81  [[nodiscard]] std::unique_ptr<PETScABecLap> makePETSc () const override;
82 #endif
83 
84 protected:
86 
88 
89  [[nodiscard]] bool supportInhomogNeumannBC () const noexcept override { return true; }
90 };
91 
92 template <typename MF>
93 void
95  const Vector<BoxArray>& a_grids,
96  const Vector<DistributionMapping>& a_dmap,
97  const LPInfo& a_info,
98  const Vector<FabFactory<FAB> const*>& a_factory)
99 {
100  MLCellLinOpT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
101 
102  this->m_overset_mask.resize(this->m_num_amr_levels);
103  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
104  this->m_overset_mask[amrlev].resize(this->m_num_mg_levels[amrlev]);
105  }
106 }
107 
108 template <typename MF>
109 void
111  const Vector<BoxArray>& a_grids,
112  const Vector<DistributionMapping>& a_dmap,
113  const Vector<iMultiFab const*>& a_overset_mask,
114  const LPInfo& a_info,
115  const Vector<FabFactory<FAB> const*>& a_factory)
116 {
117  BL_PROFILE("MLCellABecLap::define(overset)");
118 
119  AMREX_ALWAYS_ASSERT(!this->hasHiddenDimension());
120 
121  this->m_lpinfo_arg = a_info;
122 
123  auto namrlevs = static_cast<int>(a_geom.size());
124  this->m_overset_mask.resize(namrlevs);
125  for (int amrlev = 0; amrlev < namrlevs; ++amrlev)
126  {
127  this->m_overset_mask[amrlev].push_back(std::make_unique<iMultiFab>(a_grids[amrlev],
128  a_dmap[amrlev], 1, 1));
129  iMultiFab::Copy(*(this->m_overset_mask[amrlev][0]), *a_overset_mask[amrlev], 0, 0, 1, 0);
130  if (amrlev > 1) {
131  AMREX_ALWAYS_ASSERT(amrex::refine(a_geom[amrlev-1].Domain(),2)
132  == a_geom[amrlev].Domain());
133  }
134  }
135 
136  int amrlev = 0;
137  Box dom = a_geom[0].Domain();
138  for (int mglev = 1; mglev <= a_info.max_coarsening_level; ++mglev)
139  {
140  AMREX_ALWAYS_ASSERT(this->mg_coarsen_ratio == 2);
141  iMultiFab const& fine = *(this->m_overset_mask[amrlev][mglev-1]);
142  if (dom.coarsenable(2) && fine.boxArray().coarsenable(2)) {
143  dom.coarsen(2);
144  auto crse = std::make_unique<iMultiFab>(amrex::coarsen(fine.boxArray(),2),
145  fine.DistributionMap(), 1, 1);
146  ReduceOps<ReduceOpSum> reduce_op;
147  ReduceData<int> reduce_data(reduce_op);
148  using ReduceTuple = typename decltype(reduce_data)::Type;
149 #ifdef AMREX_USE_OMP
150 #pragma omp parallel if (Gpu::notInLaunchRegion())
151 #endif
152  for (MFIter mfi(*crse, TilingIfNotGPU()); mfi.isValid(); ++mfi)
153  {
154  const Box& bx = mfi.tilebox();
155  Array4<int const> const& fmsk = fine.const_array(mfi);
156  Array4<int> const& cmsk = crse->array(mfi);
157  reduce_op.eval(bx, reduce_data,
158  [=] AMREX_GPU_HOST_DEVICE (Box const& b) -> ReduceTuple
159  {
160  return { coarsen_overset_mask(b, cmsk, fmsk) };
161  });
162  }
163  ReduceTuple hv = reduce_data.value(reduce_op);
164  if (amrex::get<0>(hv) == 0) {
165  this->m_overset_mask[amrlev].push_back(std::move(crse));
166  } else {
167  break;
168  }
169  } else {
170  break;
171  }
172  }
173  int max_overset_mask_coarsening_level = this->m_overset_mask[amrlev].size()-1;
174  ParallelAllReduce::Min(max_overset_mask_coarsening_level, ParallelContext::CommunicatorSub());
175  this->m_overset_mask[amrlev].resize(max_overset_mask_coarsening_level+1);
176 
177  LPInfo linfo = a_info;
179  max_overset_mask_coarsening_level);
180 
181  MLCellLinOpT<MF>::define(a_geom, a_grids, a_dmap, linfo, a_factory);
182 
183  amrlev = 0;
184  for (int mglev = 1; mglev < this->m_num_mg_levels[amrlev]; ++mglev) {
185  MF foo(this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev], 1, 0, MFInfo().SetAlloc(false));
186  if (! amrex::isMFIterSafe(*(this->m_overset_mask[amrlev][mglev]), foo)) {
187  auto osm = std::make_unique<iMultiFab>(this->m_grids[amrlev][mglev],
188  this->m_dmap[amrlev][mglev], 1, 1);
189  osm->ParallelCopy(*(this->m_overset_mask[amrlev][mglev]));
190  std::swap(osm, this->m_overset_mask[amrlev][mglev]);
191  }
192  }
193 
194  for (amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev) {
195  for (int mglev = 1; mglev < this->m_num_mg_levels[amrlev]; ++mglev) { // for ref_ratio 4
196  this->m_overset_mask[amrlev].push_back(std::make_unique<iMultiFab>(this->m_grids[amrlev][mglev],
197  this->m_dmap[amrlev][mglev],
198  1, 1));
199 
200 #ifdef AMREX_USE_GPU
201  if (Gpu::inLaunchRegion() && this->m_overset_mask[amrlev][mglev]->isFusingCandidate()) {
202  auto const& crsema = this->m_overset_mask[amrlev][mglev]->arrays();
203  auto const& finema = this->m_overset_mask[amrlev][mglev-1]->const_arrays();
204  ParallelFor(*(this->m_overset_mask[amrlev][mglev]),
205  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
206  {
207  coarsen_overset_mask(i,j,k, crsema[box_no], finema[box_no]);
208  });
210  } else
211 #endif
212  {
213 #ifdef AMREX_USE_OMP
214 #pragma omp parallel if (Gpu::notInLaunchRegion())
215 #endif
216  for (MFIter mfi(*(this->m_overset_mask[amrlev][mglev]), TilingIfNotGPU()); mfi.isValid(); ++mfi)
217  {
218  const Box& bx = mfi.tilebox();
219  Array4<int> const& cmsk = this->m_overset_mask[amrlev][mglev]->array(mfi);
220  Array4<int const> const fmsk = this->m_overset_mask[amrlev][mglev-1]->const_array(mfi);
222  {
223  coarsen_overset_mask(i,j,k, cmsk, fmsk);
224  });
225  }
226  }
227  }
228  }
229 
230  for (amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
231  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev) {
232  this->m_overset_mask[amrlev][mglev]->setBndry(1);
233  this->m_overset_mask[amrlev][mglev]->FillBoundary(this->m_geom[amrlev][mglev].periodicity());
234  }
235  }
236 }
237 
238 template <typename MF>
239 void
241 {
243 }
244 
245 template <typename MF>
246 void
248 {
250 }
251 
252 template <typename MF>
253 void
254 MLCellABecLapT<MF>::setDirichletNodesToZero (int amrlev, int mglev, MF& mf) const
255 {
256  auto const* omask = this->getOversetMask(amrlev, mglev);
257  if (omask) {
258  const int ncomp = this->getNComp();
259  auto const& mskma = omask->const_arrays();
260  auto const& ma = mf.arrays();
261  ParallelFor(mf, IntVect(0), ncomp,
262  [=] AMREX_GPU_DEVICE (int bno, int i, int j, int k, int n)
263  {
264  if (mskma[bno](i,j,k) == 0) { ma[bno](i,j,k,n) = RT(0.0); }
265  });
267  }
268 }
269 
270 template <typename MF>
271 void
273  const Vector<MF*>& a_sol,
274  Location a_loc) const
275 {
276  BL_PROFILE("MLMG::getFluxes()");
277 
278  const int ncomp = this->getNComp();
279  const RT betainv = RT(1.0) / getBScalar();
280  const int nlevs = this->NAMRLevels();
281  for (int alev = 0; alev < nlevs; ++alev) {
282  this->compFlux(alev, a_flux[alev], *a_sol[alev], a_loc);
283  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
284  this->unapplyMetricTerm(alev, 0, *a_flux[alev][idim]);
285  if (betainv != RT(1.0)) {
286  a_flux[alev][idim]->mult(betainv, 0, ncomp);
287  }
288  }
289  this->addInhomogNeumannFlux(alev, a_flux[alev], *a_sol[alev], true);
290  }
291 }
292 
293 template <typename MF>
294 void
296 {
297  bool has_inhomog_neumann = this->hasInhomogNeumannBC();
298  bool has_robin = this->hasRobinBC();
299 
300  if (!has_inhomog_neumann && !has_robin) { return; }
301 
302  int ncomp = this->getNComp();
303  const int mglev = 0;
304 
305  const auto problo = this->m_geom[amrlev][mglev].ProbLoArray();
306  const auto probhi = this->m_geom[amrlev][mglev].ProbHiArray();
307  amrex::ignore_unused(probhi);
308  const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));
309  const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? this->m_geom[amrlev][mglev].InvCellSize(1) : Real(1.0));
310  const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? this->m_geom[amrlev][mglev].InvCellSize(2) : Real(1.0));
311  const RT xlo = static_cast<RT>(problo[0]);
312  const RT dx = static_cast<RT>(this->m_geom[amrlev][mglev].CellSize(0));
313  const Box& domain = this->m_geom[amrlev][mglev].Domain();
314 
315  const RT beta = getBScalar();
316  Array<MF const*, AMREX_SPACEDIM> const& bcoef = getBCoeffs(amrlev,mglev);
317  FAB foo(Box(IntVect(0),IntVect(1)));
318  bool has_bcoef = (bcoef[0] != nullptr);
319 
320  const auto& maskvals = this->m_maskvals[amrlev][mglev];
321  const auto& bcondloc = *(this->m_bcondloc[amrlev][mglev]);
322  const auto& bndry = *(this->m_bndry_sol[amrlev]);
323 
324  MFItInfo mfi_info;
325  if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
326 
327 #ifdef AMREX_USE_OMP
328 #pragma omp parallel if (Gpu::notInLaunchRegion())
329 #endif
330  for (MFIter mfi(rhs, mfi_info); mfi.isValid(); ++mfi)
331  {
332  const Box& vbx = mfi.validbox();
333  auto const& rhsfab = rhs.array(mfi);
334 
335  const auto & bdlv = bcondloc.bndryLocs(mfi);
336  const auto & bdcv = bcondloc.bndryConds(mfi);
337 
338  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
339  {
340  auto const bfab = (has_bcoef)
341  ? bcoef[idim]->const_array(mfi) : foo.const_array();
342  const Orientation olo(idim,Orientation::low);
343  const Orientation ohi(idim,Orientation::high);
344  const Box blo = amrex::adjCellLo(vbx, idim);
345  const Box bhi = amrex::adjCellHi(vbx, idim);
346  const auto& mlo = maskvals[olo].array(mfi);
347  const auto& mhi = maskvals[ohi].array(mfi);
348  const auto& bvlo = bndry.bndryValues(olo).array(mfi);
349  const auto& bvhi = bndry.bndryValues(ohi).array(mfi);
350  bool outside_domain_lo = !(domain.contains(blo));
351  bool outside_domain_hi = !(domain.contains(bhi));
352  if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
353  for (int icomp = 0; icomp < ncomp; ++icomp) {
354  const BoundCond bctlo = bdcv[icomp][olo];
355  const BoundCond bcthi = bdcv[icomp][ohi];
356  const RT bcllo = bdlv[icomp][olo];
357  const RT bclhi = bdlv[icomp][ohi];
358  if (this->m_lobc_orig[icomp][idim] == LinOpBCType::inhomogNeumann && outside_domain_lo)
359  {
360  if (idim == 0) {
361  RT fac = beta*dxi;
362  if (this->m_has_metric_term && !has_bcoef) {
363 #if (AMREX_SPACEDIM == 1)
364  fac *= static_cast<RT>(problo[0]*problo[0]);
365 #elif (AMREX_SPACEDIM == 2)
366  fac *= static_cast<RT>(problo[0]);
367 #endif
368  }
369  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
370  {
371  mllinop_apply_innu_xlo(i,j,k, rhsfab, mlo, bfab,
372  bctlo, bcllo, bvlo,
373  fac, has_bcoef, icomp);
374  });
375  } else if (idim == 1) {
376  RT fac = beta*dyi;
377  if (this->m_has_metric_term && !has_bcoef) {
378  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
379  {
380  mllinop_apply_innu_ylo_m(i,j,k, rhsfab, mlo,
381  bctlo, bcllo, bvlo,
382  fac, xlo, dx, icomp);
383  });
384  }
385  else {
386  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
387  {
388  mllinop_apply_innu_ylo(i,j,k, rhsfab, mlo, bfab,
389  bctlo, bcllo, bvlo,
390  fac, has_bcoef, icomp);
391  });
392  }
393  } else {
394  RT fac = beta*dzi;
395  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
396  {
397  mllinop_apply_innu_zlo(i,j,k, rhsfab, mlo, bfab,
398  bctlo, bcllo, bvlo,
399  fac, has_bcoef, icomp);
400  });
401  }
402  }
403  if (this->m_hibc_orig[icomp][idim] == LinOpBCType::inhomogNeumann && outside_domain_hi)
404  {
405  if (idim == 0) {
406  RT fac = beta*dxi;
407  if (this->m_has_metric_term && !has_bcoef) {
408 #if (AMREX_SPACEDIM == 1)
409  fac *= static_cast<RT>(probhi[0]*probhi[0]);
410 #elif (AMREX_SPACEDIM == 2)
411  fac *= static_cast<RT>(probhi[0]);
412 #endif
413  }
414  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
415  {
416  mllinop_apply_innu_xhi(i,j,k, rhsfab, mhi, bfab,
417  bcthi, bclhi, bvhi,
418  fac, has_bcoef, icomp);
419  });
420  } else if (idim == 1) {
421  RT fac = beta*dyi;
422  if (this->m_has_metric_term && !has_bcoef) {
423  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
424  {
425  mllinop_apply_innu_yhi_m(i,j,k, rhsfab, mhi,
426  bcthi, bclhi, bvhi,
427  fac, xlo, dx, icomp);
428  });
429  } else {
430  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
431  {
432  mllinop_apply_innu_yhi(i,j,k, rhsfab, mhi, bfab,
433  bcthi, bclhi, bvhi,
434  fac, has_bcoef, icomp);
435  });
436  }
437  } else {
438  RT fac = beta*dzi;
439  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
440  {
441  mllinop_apply_innu_zhi(i,j,k, rhsfab, mhi, bfab,
442  bcthi, bclhi, bvhi,
443  fac, has_bcoef, icomp);
444  });
445  }
446  }
447 
448  if (has_robin) {
449  // For Robin BC, see comments in AMReX_MLABecLaplacian.cpp above
450  // function applyRobinBCTermsCoeffs.
451  auto const& rbc = (*this->m_robin_bcval[amrlev])[mfi].const_array(icomp*3);
452  if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
453  {
454  if (idim == 0) {
455  RT fac = beta*dxi*dxi;
456  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
457  {
458  RT A = rbc(i,j,k,2)
459  / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
460  rhsfab(i+1,j,k,icomp) += fac*bfab(i+1,j,k,icomp)*A;
461  });
462  } else if (idim == 1) {
463  RT fac = beta*dyi*dyi;
464  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
465  {
466  RT A = rbc(i,j,k,2)
467  / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
468  rhsfab(i,j+1,k,icomp) += fac*bfab(i,j+1,k,icomp)*A;
469  });
470  } else {
471  RT fac = beta*dzi*dzi;
472  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
473  {
474  RT A = rbc(i,j,k,2)
475  / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
476  rhsfab(i,j,k+1,icomp) += fac*bfab(i,j,k+1,icomp)*A;
477  });
478  }
479  }
480  if (this->m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
481  {
482  if (idim == 0) {
483  RT fac = beta*dxi*dxi;
484  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
485  {
486  RT A = rbc(i,j,k,2)
487  / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
488  rhsfab(i-1,j,k,icomp) += fac*bfab(i,j,k,icomp)*A;
489  });
490  } else if (idim == 1) {
491  RT fac = beta*dyi*dyi;
492  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
493  {
494  RT A = rbc(i,j,k,2)
495  / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
496  rhsfab(i,j-1,k,icomp) += fac*bfab(i,j,k,icomp)*A;
497  });
498  } else {
499  RT fac = beta*dzi*dzi;
500  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
501  {
502  RT A = rbc(i,j,k,2)
503  / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
504  rhsfab(i,j,k-1,icomp) += fac*bfab(i,j,k,icomp)*A;
505  });
506  }
507  }
508  }
509  }
510  }
511 
512  }
513 }
514 
515 template <typename MF>
516 void
518  int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad, MF const& sol,
519  bool mult_bcoef) const
520 {
521  /*
522  * if (mult_bcoef == true)
523  * grad is -bceof*grad phi
524  * else
525  * grad is grad phi
526  */
527  RT fac = mult_bcoef ? RT(-1.0) : RT(1.0);
528 
529  bool has_inhomog_neumann = this->hasInhomogNeumannBC();
530  bool has_robin = this->hasRobinBC();
531 
532  if (!has_inhomog_neumann && !has_robin) { return; }
533 
534  int ncomp = this->getNComp();
535  const int mglev = 0;
536 
537  const auto dxinv = this->m_geom[amrlev][mglev].InvCellSize();
538  const Box domain = this->m_geom[amrlev][mglev].growPeriodicDomain(1);
539 
540  Array<MF const*, AMREX_SPACEDIM> bcoef = {AMREX_D_DECL(nullptr,nullptr,nullptr)};
541  if (mult_bcoef) {
542  bcoef = getBCoeffs(amrlev,mglev);
543  }
544 
545  const auto& bndry = *this->m_bndry_sol[amrlev];
546 
547  MFItInfo mfi_info;
548  if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
549 
550 #ifdef AMREX_USE_OMP
551 #pragma omp parallel if (Gpu::notInLaunchRegion())
552 #endif
553  for (MFIter mfi(sol, mfi_info); mfi.isValid(); ++mfi)
554  {
555  Box const& vbx = mfi.validbox();
556  for (OrientationIter orit; orit.isValid(); ++orit) {
557  const Orientation ori = orit();
558  const int idim = ori.coordDir();
559  const Box& ccb = amrex::adjCell(vbx, ori);
560  const Dim3 os = IntVect::TheDimensionVector(idim).dim3();
561  const RT dxi = static_cast<RT>(dxinv[idim]);
562  if (! domain.contains(ccb)) {
563  for (int icomp = 0; icomp < ncomp; ++icomp) {
564  auto const& phi = sol.const_array(mfi,icomp);
565  auto const& bv = bndry.bndryValues(ori).multiFab().const_array(mfi,icomp);
566  auto const& bc = bcoef[idim] ? bcoef[idim]->const_array(mfi,icomp)
567  : Array4<RT const>{};
568  auto const& f = grad[idim]->array(mfi,icomp);
569  if (ori.isLow()) {
570  if (this->m_lobc_orig[icomp][idim] ==
571  LinOpBCType::inhomogNeumann) {
572  AMREX_HOST_DEVICE_FOR_3D(ccb, i, j, k,
573  {
574  int ii = i+os.x;
575  int jj = j+os.y;
576  int kk = k+os.z;
577  RT b = bc ? bc(ii,jj,kk) : RT(1.0);
578  f(ii,jj,kk) = fac*b*bv(i,j,k);
579  });
580  } else if (this->m_lobc_orig[icomp][idim] ==
581  LinOpBCType::Robin) {
582  auto const& rbc = (*this->m_robin_bcval[amrlev])[mfi].const_array(icomp*3);
583  AMREX_HOST_DEVICE_FOR_3D(ccb, i, j, k,
584  {
585  int ii = i+os.x;
586  int jj = j+os.y;
587  int kk = k+os.z;
588  RT tmp = RT(1.0) /
589  (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
590  RT RA = rbc(i,j,k,2) * tmp;
591  RT RB = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5)) * tmp;
592  RT b = bc ? bc(ii,jj,kk) : RT(1.0);
593  f(ii,jj,kk) = fac*b*dxi*((RT(1.0)-RB)*phi(ii,jj,kk)-RA);
594  });
595  }
596  } else {
597  if (this->m_hibc_orig[icomp][idim] ==
598  LinOpBCType::inhomogNeumann) {
599  AMREX_HOST_DEVICE_FOR_3D(ccb, i, j, k,
600  {
601  RT b = bc ? bc(i,j,k) : RT(1.0);
602  f(i,j,k) = fac*b*bv(i,j,k);
603  });
604  } else if (this->m_hibc_orig[icomp][idim] ==
605  LinOpBCType::Robin) {
606  auto const& rbc = (*this->m_robin_bcval[amrlev])[mfi].const_array(icomp*3);
607  AMREX_HOST_DEVICE_FOR_3D(ccb, i, j, k,
608  {
609  RT tmp = RT(1.0) /
610  (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
611  RT RA = rbc(i,j,k,2) * tmp;
612  RT RB = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5)) * tmp;
613  RT b = bc ? bc(i,j,k) : RT(1.0);
614  f(i,j,k) = fac*b*dxi*(RA+(RB-RT(1.0))*
615  phi(i-os.x,j-os.y,k-os.z));
616  });
617  }
618  }
619  }
620  }
621  }
622  }
623 }
624 
625 template <typename MF>
626 void
627 MLCellABecLapT<MF>::applyOverset (int amrlev, MF& rhs) const
628 {
629  if (m_overset_mask[amrlev][0]) {
630  const int ncomp = this->getNComp();
631 #ifdef AMREX_USE_GPU
632  if (Gpu::inLaunchRegion() && m_overset_mask[amrlev][0]->isFusingCandidate()) {
633  auto const& osma = m_overset_mask[amrlev][0]->const_arrays();
634  auto const& rhsa = rhs.arrays();
635  ParallelFor(*m_overset_mask[amrlev][0], IntVect(0), ncomp,
636  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
637  {
638  if (osma[box_no](i,j,k) == 0) {
639  rhsa[box_no](i,j,k,n) = RT(0.0);
640  }
641  });
643  } else
644 #endif
645  {
646 #ifdef AMREX_USE_OMP
647 #pragma omp parallel if (Gpu::notInLaunchRegion())
648 #endif
649  for (MFIter mfi(*m_overset_mask[amrlev][0],TilingIfNotGPU()); mfi.isValid(); ++mfi)
650  {
651  const Box& bx = mfi.tilebox();
652  auto const& rfab = rhs.array(mfi);
653  auto const& osm = m_overset_mask[amrlev][0]->const_array(mfi);
654  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
655  {
656  if (osm(i,j,k) == 0) { rfab(i,j,k,n) = RT(0.0); }
657  });
658  }
659  }
660  }
661 }
662 
663 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
664 template <typename MF>
665 std::unique_ptr<Hypre>
666 MLCellABecLapT<MF>::makeHypre (Hypre::Interface hypre_interface) const
667 {
668  if constexpr (!std::is_same<MF,MultiFab>()) {
669  amrex::Abort("MLCellABecLap Hypre interface only supports MultiFab");
670  } else {
671  const BoxArray& ba = this->m_grids[0].back();
672  const DistributionMapping& dm = this->m_dmap[0].back();
673  const Geometry& geom = this->m_geom[0].back();
674  const auto& factory = *(this->m_factory[0].back());
675  MPI_Comm comm = this->BottomCommunicator();
676 
677  const int mglev = this->NMGLevels(0)-1;
678 
679  auto om = getOversetMask(0, mglev);
680 
681  auto hypre_solver = amrex::makeHypre(ba, dm, geom, comm, hypre_interface, om);
682 
683  hypre_solver->setScalars(getAScalar(), getBScalar());
684 
685  auto ac = getACoeffs(0, mglev);
686  if (ac)
687  {
688  hypre_solver->setACoeffs(*ac);
689  }
690  else
691  {
692  MultiFab alpha(ba,dm,1,0,MFInfo(),factory);
693  alpha.setVal(0.0);
694  hypre_solver->setACoeffs(alpha);
695  }
696 
697  auto bc = getBCoeffs(0, mglev);
698  if (bc[0])
699  {
700  hypre_solver->setBCoeffs(bc);
701  }
702  else
703  {
704  Array<MultiFab,AMREX_SPACEDIM> beta;
705  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
706  {
707  beta[idim].define(amrex::convert(ba,IntVect::TheDimensionVector(idim)),
708  dm, 1, 0, MFInfo(), factory);
709  beta[idim].setVal(1.0);
710  }
711  hypre_solver->setBCoeffs(amrex::GetArrOfConstPtrs(beta));
712  }
713  hypre_solver->setIsMatrixSingular(this->isBottomSingular());
714 
715  return hypre_solver;
716  }
717  return nullptr;
718 }
719 #endif
720 
721 #ifdef AMREX_USE_PETSC
722 template <typename MF>
723 std::unique_ptr<PETScABecLap>
724 MLCellABecLapT<MF>::makePETSc () const
725 {
726  if constexpr (!std::is_same<MF,MultiFab>()) {
727  amrex::Abort("MLCellABecLap PETSc interface only supports MultiFab");
728  } else {
729  const BoxArray& ba = this->m_grids[0].back();
730  const DistributionMapping& dm = this->m_dmap[0].back();
731  const Geometry& geom = this->m_geom[0].back();
732  const auto& factory = *(this->m_factory[0].back());
733  MPI_Comm comm = this->BottomCommunicator();
734 
735  auto petsc_solver = makePetsc(ba, dm, geom, comm);
736 
737  petsc_solver->setScalars(getAScalar(), getBScalar());
738 
739  const int mglev = this->NMGLevels(0)-1;
740  auto ac = getACoeffs(0, mglev);
741  if (ac)
742  {
743  petsc_solver->setACoeffs(*ac);
744  }
745  else
746  {
747  MultiFab alpha(ba,dm,1,0,MFInfo(),factory);
748  alpha.setVal(0.0);
749  petsc_solver->setACoeffs(alpha);
750  }
751 
752  auto bc = getBCoeffs(0, mglev);
753  if (bc[0])
754  {
755  petsc_solver->setBCoeffs(bc);
756  }
757  else
758  {
759  Array<MultiFab,AMREX_SPACEDIM> beta;
760  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
761  {
762  beta[idim].define(amrex::convert(ba,IntVect::TheDimensionVector(idim)),
763  dm, 1, 0, MFInfo(), factory);
764  beta[idim].setVal(1.0);
765  }
766  petsc_solver->setBCoeffs(amrex::GetArrOfConstPtrs(beta));
767  }
768  return petsc_solver;
769  }
770  return nullptr;
771 }
772 #endif
773 
774 extern template class MLCellABecLapT<MultiFab>;
775 
777 
778 }
779 
780 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition: AMReX_GpuLaunch.nolint.H:54
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition: AMReX_GpuLaunch.nolint.H:50
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
int MPI_Comm
Definition: AMReX_ccse-mpi.H:47
Maintain an identifier for boundary condition types.
Definition: AMReX_BoundCond.H:20
AMREX_GPU_HOST_DEVICE bool coarsenable(const IntVectND< dim > &refrat, const IntVectND< dim > &min_width) const noexcept
Definition: AMReX_Box.H:745
AMREX_GPU_HOST_DEVICE BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:708
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Definition: AMReX_FabFactory.H:50
Interface
Definition: AMReX_Hypre.H:21
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:691
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_MLCellABecLap.H:13
virtual MF const * getACoeffs(int amrlev, int mglev) const =0
MLCellABecLapT(const MLCellABecLapT< MF > &)=delete
LPInfo m_lpinfo_arg
Definition: AMReX_MLCellABecLap.H:87
void addInhomogNeumannFlux(int amrlev, const Array< MF *, AMREX_SPACEDIM > &grad, MF const &sol, bool mult_bcoef) const final
Definition: AMReX_MLCellABecLap.H:517
void getFluxes(const Vector< Array< MF *, AMREX_SPACEDIM > > &a_flux, const Vector< MF * > &a_sol, Location a_loc) const final
Definition: AMReX_MLCellABecLap.H:272
MLCellABecLapT(MLCellABecLapT< MF > &&)=delete
typename MF::value_type RT
Definition: AMReX_MLCellABecLap.H:17
void setDirichletNodesToZero(int amrlev, int mglev, MF &mf) const override
Definition: AMReX_MLCellABecLap.H:254
void applyOverset(int amrlev, MF &rhs) const override
for overset solver only
Definition: AMReX_MLCellABecLap.H:627
virtual RT getBScalar() const =0
iMultiFab const * getOversetMask(int amrlev, int mglev) const
Definition: AMReX_MLCellABecLap.H:42
virtual Array< MF const *, AMREX_SPACEDIM > getBCoeffs(int amrlev, int mglev) const =0
bool needsUpdate() const override
Does it need update if it's reused?
Definition: AMReX_MLCellABecLap.H:46
typename MLLinOpT< MF >::Location Location
Definition: AMReX_MLCellABecLap.H:19
typename MF::fab_type FAB
Definition: AMReX_MLCellABecLap.H:16
Vector< Vector< std::unique_ptr< iMultiFab > > > m_overset_mask
Definition: AMReX_MLCellABecLap.H:85
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLCellABecLap.H:94
MLCellABecLapT< MF > & operator=(const MLCellABecLapT< MF > &)=delete
bool supportInhomogNeumannBC() const noexcept override
Definition: AMReX_MLCellABecLap.H:89
void prepareForSolve() override
Definition: AMReX_MLCellABecLap.H:247
~MLCellABecLapT() override=default
void getFluxes(const Vector< MF * > &, const Vector< MF * > &) const final
Definition: AMReX_MLCellABecLap.H:58
virtual RT getAScalar() const =0
void applyInhomogNeumannTerm(int amrlev, MF &rhs) const final
Extra terms introduced when we treat inhomogeneous Nuemann BC as homogeneous.
Definition: AMReX_MLCellABecLap.H:295
void update() override
Update for reuse.
Definition: AMReX_MLCellABecLap.H:240
Definition: AMReX_MLCellLinOp.H:21
void update() override
Update for reuse.
Definition: AMReX_MLCellLinOp.H:642
void prepareForSolve() override
Definition: AMReX_MLCellLinOp.H:1613
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLCellLinOp.H:355
bool needsUpdate() const override
Does it need update if it's reused?
Definition: AMReX_MLCellLinOp.H:53
An Iterator over the Orientation of Faces of a Box.
Definition: AMReX_Orientation.H:135
AMREX_GPU_HOST_DEVICE bool isValid() const noexcept
Is the iterator valid?
Definition: AMReX_Orientation.H:156
Encapsulation of the Orientation of the Faces of a Box.
Definition: AMReX_Orientation.H:29
AMREX_GPU_HOST_DEVICE int coordDir() const noexcept
Returns the coordinate direction.
Definition: AMReX_Orientation.H:83
AMREX_GPU_HOST_DEVICE bool isLow() const noexcept
Returns true if Orientation is low.
Definition: AMReX_Orientation.H:89
@ low
Definition: AMReX_Orientation.H:34
@ high
Definition: AMReX_Orientation.H:34
Definition: AMReX_Reduce.H:249
Type value()
Definition: AMReX_Reduce.H:281
Definition: AMReX_Reduce.H:364
std::enable_if_t< IsFabArray< MF >::value > eval(MF const &mf, IntVect const &nghost, D &reduce_data, F &&f)
Definition: AMReX_Reduce.H:441
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
Definition: AMReX_iMultiFab.H:32
static void Copy(iMultiFab &dst, const iMultiFab &src, int srccomp, int dstcomp, int numcomp, int nghost)
Copy from src to dst including nghost ghost cells. The two iMultiFabs MUST have the same underlying B...
Definition: AMReX_iMultiFab.cpp:39
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
void Min(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:152
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void swap(T &a, T &b) noexcept
Definition: AMReX_algoim_K.H:113
@ min
Definition: AMReX_ParallelReduce.H:18
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int coarsen_overset_mask(Box const &bx, Array4< int > const &cmsk, Array4< int const > const &fmsk) noexcept
Definition: AMReX_MLCellABecLap_1D_K.H:8
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
std::array< T const *, AMREX_SPACEDIM > GetArrOfConstPtrs(const std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition: AMReX_Array.H:875
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_zlo(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:1026
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_xhi(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:948
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_ylo(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:964
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_zhi(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:1042
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCell(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to adjCellLo and adjCellHi; operates on given face.
Definition: AMReX_Box.H:1634
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition: AMReX_MFIter.H:219
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_xlo(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:932
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_ylo_m(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, BoundCond bct, T, Array4< T const > const &bcval, T fac, T xlo, T dx, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:980
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_yhi(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:995
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition: AMReX_Box.H:1342
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_innu_yhi_m(int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, BoundCond bct, T, Array4< T const > const &bcval, T fac, T xlo, T dx, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:1011
std::unique_ptr< PETScABecLap > makePetsc(const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_)
Definition: AMReX_PETSc.cpp:54
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:221
std::unique_ptr< Hypre > makeHypre(const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask)
Definition: AMReX_Hypre.cpp:12
std::array< T, N > Array
Definition: AMReX_Array.H:23
Definition: AMReX_Array4.H:61
Definition: AMReX_Dim3.H:12
int x
Definition: AMReX_Dim3.H:12
int z
Definition: AMReX_Dim3.H:12
int y
Definition: AMReX_Dim3.H:12
Definition: AMReX_MLLinOp.H:35
int max_coarsening_level
Definition: AMReX_MLLinOp.H:44
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:65
Definition: AMReX_MFIter.H:20
MFItInfo & SetDynamic(bool f) noexcept
Definition: AMReX_MFIter.H:34