Block-Structured AMR Software Framework
AMReX_MLALaplacian.H
Go to the documentation of this file.
1 #ifndef AMREX_MLALAPLACIAN_H_
2 #define AMREX_MLALAPLACIAN_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MLCellABecLap.H>
6 #include <AMReX_MLALap_K.H>
7 #include <AMReX_MultiFabUtil.H>
8 
9 namespace amrex {
10 
11 template <typename MF>
13  : public MLCellABecLapT<MF>
14 {
15 public:
16 
17  using FAB = typename MF::fab_type;
18  using RT = typename MF::value_type;
19 
20  using BCType = LinOpBCType;
21  using Location = typename MLLinOpT<MF>::Location;
22 
23  MLALaplacianT () = default;
24  MLALaplacianT (const Vector<Geometry>& a_geom,
25  const Vector<BoxArray>& a_grids,
26  const Vector<DistributionMapping>& a_dmap,
27  const LPInfo& a_info = LPInfo(),
28  const Vector<FabFactory<FAB> const*>& a_factory = {},
29  int a_ncomp = 1);
30  ~MLALaplacianT () override;
31 
32  MLALaplacianT (const MLALaplacianT<MF>&) = delete;
36 
37  void define (const Vector<Geometry>& a_geom,
38  const Vector<BoxArray>& a_grids,
39  const Vector<DistributionMapping>& a_dmap,
40  const LPInfo& a_info = LPInfo(),
41  const Vector<FabFactory<FAB> const*>& a_factory = {});
42 
43  void setScalars (RT a, RT b) noexcept;
44  void setACoeffs (int amrlev, const MF& alpha);
45 
46  [[nodiscard]] int getNComp () const override { return m_ncomp; }
47 
48  [[nodiscard]] bool needsUpdate () const override {
50  }
51  void update () override;
52 
53  void prepareForSolve () final;
54  [[nodiscard]] bool isSingular (int amrlev) const final { return m_is_singular[amrlev]; }
55  [[nodiscard]] bool isBottomSingular () const final { return m_is_singular[0]; }
56  void Fapply (int amrlev, int mglev, MF& out, const MF& in) const final;
57  void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const final;
58  void FFlux (int amrlev, const MFIter& mfi,
59  const Array<FAB*,AMREX_SPACEDIM>& flux,
60  const FAB& sol, Location /* loc */,
61  int face_only=0) const final;
62 
63  void normalize (int amrlev, int mglev, MF& mf) const final;
64 
65  [[nodiscard]] RT getAScalar () const final { return m_a_scalar; }
66  [[nodiscard]] RT getBScalar () const final { return m_b_scalar; }
67  [[nodiscard]] MF const* getACoeffs (int amrlev, int mglev) const final
68  { return &(m_a_coeffs[amrlev][mglev]); }
69  [[nodiscard]] Array<MF const*,AMREX_SPACEDIM> getBCoeffs (int /*amrlev*/, int /*mglev*/) const final
70  { return {{ AMREX_D_DECL(nullptr,nullptr,nullptr)}}; }
71 
72  [[nodiscard]] std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const final {
73  amrex::Abort("MLALaplacian::makeNLinOp: Not implemented");
74  return std::unique_ptr<MLLinOpT<MF>>{};
75  }
76 
77  void averageDownCoeffsSameAmrLevel (int amrlev, Vector<MF>& a);
78  void averageDownCoeffs ();
79  void averageDownCoeffsToCoarseAmrLevel (int flev);
80 
81 private:
82 
83  bool m_needs_update = true;
84 
85  RT m_a_scalar = std::numeric_limits<RT>::quiet_NaN();
86  RT m_b_scalar = std::numeric_limits<RT>::quiet_NaN();
88 
90 
91  int m_ncomp = 1;
92 
93  void updateSingularFlag ();
94 };
95 
96 template <typename MF>
98  const Vector<BoxArray>& a_grids,
99  const Vector<DistributionMapping>& a_dmap,
100  const LPInfo& a_info,
101  const Vector<FabFactory<FAB> const*>& a_factory,
102  int a_ncomp)
103  : m_ncomp(a_ncomp)
104 {
105  define(a_geom, a_grids, a_dmap, a_info, a_factory);
106 }
107 
108 template <typename MF>
109 void
111  const Vector<BoxArray>& a_grids,
112  const Vector<DistributionMapping>& a_dmap,
113  const LPInfo& a_info,
114  const Vector<FabFactory<FAB> const*>& a_factory)
115 {
116  BL_PROFILE("MLALaplacian::define()");
117 
118  MLCellABecLapT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
119 
120  const int ncomp = this->getNComp();
121 
122  m_a_coeffs.resize(this->m_num_amr_levels);
123  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
124  {
125  m_a_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
126  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
127  {
128  m_a_coeffs[amrlev][mglev].define(this->m_grids[amrlev][mglev],
129  this->m_dmap[amrlev][mglev], ncomp, 0);
130  }
131  }
132 }
133 
134 template <typename MF>
136 
137 template <typename MF>
138 void
140 {
141  m_a_scalar = a;
142  m_b_scalar = b;
143  if (a == RT(0.0))
144  {
145  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
146  {
147  m_a_coeffs[amrlev][0].setVal(RT(0.0));
148  }
149  }
150 }
151 
152 template <typename MF>
153 void
154 MLALaplacianT<MF>::setACoeffs (int amrlev, const MF& alpha)
155 {
156  const int ncomp = this->getNComp();
157  m_a_coeffs[amrlev][0].LocalCopy(alpha, 0, 0, ncomp, IntVect(0));
158  m_needs_update = true;
159 }
160 
161 template <typename MF>
162 void
164 {
165  BL_PROFILE("MLALaplacian::averageDownCoeffs()");
166 
167  for (int amrlev = this->m_num_amr_levels-1; amrlev > 0; --amrlev)
168  {
169  auto& fine_a_coeffs = m_a_coeffs[amrlev];
170 
171  averageDownCoeffsSameAmrLevel(amrlev, fine_a_coeffs);
172  averageDownCoeffsToCoarseAmrLevel(amrlev);
173  }
174 
175  averageDownCoeffsSameAmrLevel(0, m_a_coeffs[0]);
176 }
177 
178 template <typename MF>
179 void
181 {
182  const int ncomp = this->getNComp();
183  const int nmglevs = a.size();
184  for (int mglev = 1; mglev < nmglevs; ++mglev)
185  {
186  if (m_a_scalar == RT(0.0))
187  {
188  a[mglev].setVal(RT(0.0));
189  }
190  else
191  {
192  AMREX_ASSERT(amrlev == 0 || !this->hasHiddenDimension());
193  IntVect ratio = (amrlev > 0) ? IntVect(this->mg_coarsen_ratio) : this->mg_coarsen_ratio_vec[mglev-1];
194  amrex::average_down(a[mglev-1], a[mglev], 0, ncomp, ratio);
195  }
196  }
197 }
198 
199 template <typename MF>
200 void
202 {
203  const int ncomp = this->getNComp();
204  auto& fine_a_coeffs = m_a_coeffs[flev ].back();
205  auto& crse_a_coeffs = m_a_coeffs[flev-1].front();
206 
207  if (m_a_scalar != RT(0.0)) {
208  // We coarsen from the back of flev to the front of flev-1.
209  // So we use this->mg_coarsen_ratio.
210  amrex::average_down(fine_a_coeffs, crse_a_coeffs, 0, ncomp, this->mg_coarsen_ratio);
211  }
212 }
213 
214 template <typename MF>
215 void
217 {
218  m_is_singular.clear();
219  m_is_singular.resize(this->m_num_amr_levels, false);
220  auto itlo = std::find(this->m_lobc[0].begin(), this->m_lobc[0].end(), BCType::Dirichlet);
221  auto ithi = std::find(this->m_hibc[0].begin(), this->m_hibc[0].end(), BCType::Dirichlet);
222  if (itlo == this->m_lobc[0].end() && ithi == this->m_hibc[0].end())
223  { // No Dirichlet
224  for (int alev = 0; alev < this->m_num_amr_levels; ++alev)
225  {
226  if (this->m_domain_covered[alev])
227  {
228  if (m_a_scalar == RT(0.0))
229  {
230  m_is_singular[alev] = true;
231  }
232  else
233  {
234  // We are only testing component 0 here, assuming the others
235  // are similar.
236  RT asum = m_a_coeffs[alev].back().sum(0,IntVect(0));
237  RT amax = m_a_coeffs[alev].back().norminf(0,1,IntVect(0));
238  m_is_singular[alev] = (asum <= amax * RT(1.e-12));
239  }
240  }
241  }
242  }
243 }
244 
245 template <typename MF>
246 void
248 {
249  BL_PROFILE("MLALaplacian::prepareForSolve()");
251  averageDownCoeffs();
252  updateSingularFlag();
253  m_needs_update = false;
254 }
255 
256 template <typename MF>
257 void
259 {
261  averageDownCoeffs();
262  updateSingularFlag();
263  m_needs_update = false;
264 }
265 
266 template <typename MF>
267 void
268 MLALaplacianT<MF>::Fapply (int amrlev, int mglev, MF& out, const MF& in) const
269 {
270  BL_PROFILE("MLALaplacian::Fapply()");
271 
272  const int ncomp = this->getNComp();
273 
274  const MF& acoef = m_a_coeffs[amrlev][mglev];
275 
276  const GpuArray<RT,AMREX_SPACEDIM> dxinv
277  {AMREX_D_DECL(RT(this->m_geom[amrlev][mglev].InvCellSize(0)),
278  RT(this->m_geom[amrlev][mglev].InvCellSize(1)),
279  RT(this->m_geom[amrlev][mglev].InvCellSize(2)))};
280 #if (AMREX_SPACEDIM < 3)
281  const RT dx = RT(this->m_geom[amrlev][mglev].CellSize(0));
282  const RT probxlo = RT(this->m_geom[amrlev][mglev].ProbLo(0));
283 #endif
284 
285 #if (AMREX_SPACEDIM == 3)
286  GpuArray<RT,2> dhinv {this->get_d0(dxinv[0], dxinv[1], dxinv[2]),
287  this->get_d1(dxinv[0], dxinv[1], dxinv[2])};
288 #endif
289 
290  const RT ascalar = m_a_scalar;
291  const RT bscalar = m_b_scalar;
292 
293 #ifdef AMREX_USE_OMP
294 #pragma omp parallel if (Gpu::notInLaunchRegion())
295 #endif
296  for (MFIter mfi(out, TilingIfNotGPU()); mfi.isValid(); ++mfi)
297  {
298  const Box& bx = mfi.tilebox();
299  const auto& xfab = in.array(mfi);
300  const auto& yfab = out.array(mfi);
301  const auto& afab = acoef.array(mfi);
302 
303 #if (AMREX_SPACEDIM != 3)
304  if (this->m_has_metric_term) {
306  {
307  mlalap_adotx_m(tbx, yfab, xfab, afab, dxinv, ascalar, bscalar, dx, probxlo, ncomp);
308  });
309  } else {
311  {
312  mlalap_adotx(tbx, yfab, xfab, afab, dxinv, ascalar, bscalar, ncomp);
313  });
314  }
315 #else
316  if (this->hasHiddenDimension()) {
317  Box const& bx2d = this->compactify(bx);
318  const auto& xfab2d = this->compactify(xfab);
319  const auto& yfab2d = this->compactify(yfab);
320  const auto& afab2d = this->compactify(afab);
321  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bx2d, tbx2d,
322  {
323  TwoD::mlalap_adotx(tbx2d, yfab2d, xfab2d, afab2d, dhinv, ascalar, bscalar, ncomp);
324  });
325  } else {
327  {
328  mlalap_adotx(tbx, yfab, xfab, afab, dxinv, ascalar, bscalar, ncomp);
329  });
330  }
331 #endif
332  }
333 }
334 
335 template <typename MF>
336 void
337 MLALaplacianT<MF>::normalize (int amrlev, int mglev, MF& mf) const
338 {
339  BL_PROFILE("MLALaplacian::normalize()");
340 
341  const int ncomp = this->getNComp();
342 
343  const MF& acoef = m_a_coeffs[amrlev][mglev];
344 
345  const GpuArray<RT,AMREX_SPACEDIM> dxinv
346  {AMREX_D_DECL(RT(this->m_geom[amrlev][mglev].InvCellSize(0)),
347  RT(this->m_geom[amrlev][mglev].InvCellSize(1)),
348  RT(this->m_geom[amrlev][mglev].InvCellSize(2)))};
349 #if (AMREX_SPACEDIM < 3)
350  const RT dx = RT(this->m_geom[amrlev][mglev].CellSize(0));
351  const RT probxlo = RT(this->m_geom[amrlev][mglev].ProbLo(0));
352 #endif
353 
354 #if (AMREX_SPACEDIM == 3)
355  GpuArray<RT,2> dhinv {this->get_d0(dxinv[0], dxinv[1], dxinv[2]),
356  this->get_d1(dxinv[0], dxinv[1], dxinv[2])};
357 #endif
358 
359  const RT ascalar = m_a_scalar;
360  const RT bscalar = m_b_scalar;
361 
362 #ifdef AMREX_USE_OMP
363 #pragma omp parallel if (Gpu::notInLaunchRegion())
364 #endif
365  for (MFIter mfi(mf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
366  {
367  const Box& bx = mfi.tilebox();
368  const auto& fab = mf.array(mfi);
369  const auto& afab = acoef.array(mfi);
370 
371 #if (AMREX_SPACEDIM != 3)
372  if (this->m_has_metric_term) {
374  {
375  mlalap_normalize_m(tbx, fab, afab, dxinv, ascalar, bscalar, dx, probxlo, ncomp);
376  });
377  } else {
379  {
380  mlalap_normalize(tbx, fab, afab, dxinv, ascalar, bscalar, ncomp);
381  });
382  }
383 #else
384  if (this->hasHiddenDimension()) {
385  Box const& bx2d = this->compactify(bx);
386  const auto& fab2d = this->compactify(fab);
387  const auto& afab2d = this->compactify(afab);
388  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bx2d, tbx2d,
389  {
390  TwoD::mlalap_normalize(tbx2d, fab2d, afab2d, dhinv, ascalar, bscalar, ncomp);
391  });
392  } else {
394  {
395  mlalap_normalize(tbx, fab, afab, dxinv, ascalar, bscalar, ncomp);
396  });
397  }
398 #endif
399  }
400 }
401 
402 template <typename MF>
403 void
404 MLALaplacianT<MF>::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const
405 {
406  BL_PROFILE("MLALaplacian::Fsmooth()");
407 
408  const int ncomp = this->getNComp();
409 
410  const MF& acoef = m_a_coeffs[amrlev][mglev];
411  const auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
412  const auto& maskvals = this->m_maskvals [amrlev][mglev];
413 
414  OrientationIter oitr;
415 
416  const auto& f0 = undrrelxr[oitr()]; ++oitr;
417  const auto& f1 = undrrelxr[oitr()]; ++oitr;
418 #if (AMREX_SPACEDIM > 1)
419  const auto& f2 = undrrelxr[oitr()]; ++oitr;
420  const auto& f3 = undrrelxr[oitr()]; ++oitr;
421 #if (AMREX_SPACEDIM > 2)
422  const auto& f4 = undrrelxr[oitr()]; ++oitr;
423  const auto& f5 = undrrelxr[oitr()]; ++oitr;
424 #endif
425 #endif
426 
427  const MultiMask& mm0 = maskvals[0];
428  const MultiMask& mm1 = maskvals[1];
429 #if (AMREX_SPACEDIM > 1)
430  const MultiMask& mm2 = maskvals[2];
431  const MultiMask& mm3 = maskvals[3];
432 #if (AMREX_SPACEDIM > 2)
433  const MultiMask& mm4 = maskvals[4];
434  const MultiMask& mm5 = maskvals[5];
435 #endif
436 #endif
437 
438  const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
439  AMREX_D_TERM(const RT dhx = m_b_scalar*RT(dxinv[0]*dxinv[0]);,
440  const RT dhy = m_b_scalar*RT(dxinv[1]*dxinv[1]);,
441  const RT dhz = m_b_scalar*RT(dxinv[2]*dxinv[2]););
442 
443 #if (AMREX_SPACEDIM == 3)
444  RT dh0 = this->get_d0(dhx, dhy, dhz);
445  RT dh1 = this->get_d1(dhx, dhy, dhz);
446 #endif
447 
448 #if (AMREX_SPACEDIM < 3)
449  const RT dx = RT(this->m_geom[amrlev][mglev].CellSize(0));
450  const RT probxlo = RT(this->m_geom[amrlev][mglev].ProbLo(0));
451 #endif
452 
453  const RT alpha = m_a_scalar;
454 
455  MFItInfo mfi_info;
456  if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
457 
458 #ifdef AMREX_USE_OMP
459 #pragma omp parallel if (Gpu::notInLaunchRegion())
460 #endif
461  for (MFIter mfi(sol,mfi_info); mfi.isValid(); ++mfi)
462  {
463  const auto& m0 = mm0.array(mfi);
464  const auto& m1 = mm1.array(mfi);
465 #if (AMREX_SPACEDIM > 1)
466  const auto& m2 = mm2.array(mfi);
467  const auto& m3 = mm3.array(mfi);
468 #if (AMREX_SPACEDIM > 2)
469  const auto& m4 = mm4.array(mfi);
470  const auto& m5 = mm5.array(mfi);
471 #endif
472 #endif
473 
474  const Box& tbx = mfi.tilebox();
475  const Box& vbx = mfi.validbox();
476  const auto& solnfab = sol.array(mfi);
477  const auto& rhsfab = rhs.array(mfi);
478  const auto& afab = acoef.array(mfi);
479 
480  const auto& f0fab = f0.array(mfi);
481  const auto& f1fab = f1.array(mfi);
482 #if (AMREX_SPACEDIM > 1)
483  const auto& f2fab = f2.array(mfi);
484  const auto& f3fab = f3.array(mfi);
485 #if (AMREX_SPACEDIM > 2)
486  const auto& f4fab = f4.array(mfi);
487  const auto& f5fab = f5.array(mfi);
488 #endif
489 #endif
490 
491 #if (AMREX_SPACEDIM == 1)
492  if (this->m_has_metric_term) {
493  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx, thread_box,
494  {
495  mlalap_gsrb_m(thread_box, solnfab, rhsfab, alpha, dhx,
496  afab,
497  f0fab, m0,
498  f1fab, m1,
499  vbx, redblack,
500  dx, probxlo, ncomp);
501  });
502  } else {
503  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx, thread_box,
504  {
505  mlalap_gsrb(thread_box, solnfab, rhsfab, alpha, dhx,
506  afab,
507  f0fab, m0,
508  f1fab, m1,
509  vbx, redblack, ncomp);
510  });
511  }
512 
513 #endif
514 
515 #if (AMREX_SPACEDIM == 2)
516  if (this->m_has_metric_term) {
517  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx, thread_box,
518  {
519  mlalap_gsrb_m(thread_box, solnfab, rhsfab, alpha, dhx, dhy,
520  afab,
521  f0fab, m0,
522  f1fab, m1,
523  f2fab, m2,
524  f3fab, m3,
525  vbx, redblack,
526  dx, probxlo, ncomp);
527  });
528  } else {
529  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx, thread_box,
530  {
531  mlalap_gsrb(thread_box, solnfab, rhsfab, alpha, dhx, dhy,
532  afab,
533  f0fab, m0,
534  f1fab, m1,
535  f2fab, m2,
536  f3fab, m3,
537  vbx, redblack, ncomp);
538  });
539  }
540 #endif
541 
542 #if (AMREX_SPACEDIM == 3)
543  if (this->hasHiddenDimension()) {
544  Box const& tbx_2d = this->compactify(tbx);
545  Box const& vbx_2d = this->compactify(vbx);
546  const auto& solnfab_2d = this->compactify(solnfab);
547  const auto& rhsfab_2d = this->compactify(rhsfab);
548  const auto& afab_2d = this->compactify(afab);
549  const auto& f0fab_2d = this->compactify(this->get_d0(f0fab,f1fab,f2fab));
550  const auto& f1fab_2d = this->compactify(this->get_d1(f0fab,f1fab,f2fab));
551  const auto& f2fab_2d = this->compactify(this->get_d0(f3fab,f4fab,f5fab));
552  const auto& f3fab_2d = this->compactify(this->get_d1(f3fab,f4fab,f5fab));
553  const auto& m0_2d = this->compactify(this->get_d0(m0,m1,m2));
554  const auto& m1_2d = this->compactify(this->get_d1(m0,m1,m2));
555  const auto& m2_2d = this->compactify(this->get_d0(m3,m4,m5));
556  const auto& m3_2d = this->compactify(this->get_d1(m3,m4,m5));
557  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx_2d, thread_box,
558  {
559  TwoD::mlalap_gsrb(thread_box, solnfab_2d, rhsfab_2d, alpha, dh0, dh1,
560  afab_2d,
561  f0fab_2d, m0_2d,
562  f1fab_2d, m1_2d,
563  f2fab_2d, m2_2d,
564  f3fab_2d, m3_2d,
565  vbx_2d, redblack, ncomp);
566  });
567  } else {
568  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( tbx, thread_box,
569  {
570  mlalap_gsrb(thread_box, solnfab, rhsfab, alpha, dhx, dhy, dhz,
571  afab,
572  f0fab, m0,
573  f1fab, m1,
574  f2fab, m2,
575  f3fab, m3,
576  f4fab, m4,
577  f5fab, m5,
578  vbx, redblack, ncomp);
579  });
580  }
581 #endif
582  }
583 }
584 
585 template <typename MF>
586 void
587 MLALaplacianT<MF>::FFlux (int amrlev, const MFIter& mfi,
588  const Array<FAB*,AMREX_SPACEDIM>& flux,
589  const FAB& sol, Location, int face_only) const
590 {
591  BL_PROFILE("MLALaplacian::FFlux()");
592 
593  const int ncomp = this->getNComp();
594  const int mglev = 0;
595  const Box& box = mfi.tilebox();
596  const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
597 
598  AMREX_D_TERM(const auto& fxarr = flux[0]->array();,
599  const auto& fyarr = flux[1]->array();,
600  const auto& fzarr = flux[2]->array(););
601  const auto& solarr = sol.array();
602 
603 #if (AMREX_SPACEDIM != 3)
604  const RT dx = RT(this->m_geom[amrlev][mglev].CellSize(0));
605  const RT probxlo = RT(this->m_geom[amrlev][mglev].ProbLo(0));
606 #endif
607 
608 #if (AMREX_SPACEDIM == 3)
609  if (face_only) {
610  if (this->hiddenDirection() != 0) {
611  RT fac = m_b_scalar * RT(dxinv[0]);
612  Box blo = amrex::bdryLo(box, 0);
613  int blen = box.length(0);
615  {
616  mlalap_flux_xface(tbox, fxarr, solarr, fac, blen, ncomp);
617  });
618  } else {
619  flux[0]->template setVal<RunOn::Device>(RT(0.0));
620  }
621  if (this->hiddenDirection() != 1) {
622  RT fac = m_b_scalar * RT(dxinv[1]);
623  Box blo = amrex::bdryLo(box, 1);
624  int blen = box.length(1);
626  {
627  mlalap_flux_yface(tbox, fyarr, solarr, fac, blen, ncomp);
628  });
629  } else {
630  flux[1]->template setVal<RunOn::Device>(RT(0.0));
631  }
632  if (this->hiddenDirection() != 2) {
633  RT fac = m_b_scalar * RT(dxinv[2]);
634  Box blo = amrex::bdryLo(box, 2);
635  int blen = box.length(2);
637  {
638  mlalap_flux_zface(tbox, fzarr, solarr, fac, blen, ncomp);
639  });
640  } else {
641  flux[2]->template setVal<RunOn::Device>(RT(0.0));
642  }
643  } else {
644  if (this->hiddenDirection() != 0) {
645  RT fac = m_b_scalar * RT(dxinv[0]);
646  Box bflux = amrex::surroundingNodes(box, 0);
647  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
648  {
649  mlalap_flux_x(tbox, fxarr, solarr, fac, ncomp);
650  });
651  } else {
652  flux[0]->template setVal<RunOn::Device>(RT(0.0));
653  }
654  if (this->hiddenDirection() != 1) {
655  RT fac = m_b_scalar * RT(dxinv[1]);
656  Box bflux = amrex::surroundingNodes(box, 1);
657  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
658  {
659  mlalap_flux_y(tbox, fyarr, solarr, fac, ncomp);
660  });
661  } else {
662  flux[1]->template setVal<RunOn::Device>(RT(0.0));
663  }
664  if (this->hiddenDirection() != 2) {
665  RT fac = m_b_scalar * RT(dxinv[2]);
666  Box bflux = amrex::surroundingNodes(box, 2);
667  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
668  {
669  mlalap_flux_z(tbox, fzarr, solarr, fac, ncomp);
670  });
671  } else {
672  flux[2]->template setVal<RunOn::Device>(RT(0.0));
673  }
674  }
675 #elif (AMREX_SPACEDIM == 2)
676  if (face_only) {
677  if (this->hiddenDirection() != 0) {
678  RT fac = m_b_scalar * RT(dxinv[0]);
679  Box blo = amrex::bdryLo(box, 0);
680  int blen = box.length(0);
681  if (this->m_has_metric_term) {
683  {
684  mlalap_flux_xface_m(tbox, fxarr, solarr, fac, blen, dx, probxlo, ncomp);
685  });
686  } else {
688  {
689  mlalap_flux_xface(tbox, fxarr, solarr, fac, blen, ncomp);
690  });
691  }
692  } else {
693  flux[0]->template setVal<RunOn::Device>(RT(0.0));
694  }
695  if (this->hiddenDirection() != 1) {
696  RT fac = m_b_scalar * RT(dxinv[1]);
697  Box blo = amrex::bdryLo(box, 1);
698  int blen = box.length(1);
699  if (this->m_has_metric_term) {
701  {
702  mlalap_flux_yface_m(tbox, fyarr, solarr, fac, blen, dx, probxlo, ncomp);
703  });
704  } else {
706  {
707  mlalap_flux_yface(tbox, fyarr, solarr, fac, blen, ncomp);
708  });
709  }
710  } else {
711  flux[1]->template setVal<RunOn::Device>(RT(0.0));
712  }
713  } else {
714  if (this->hiddenDirection() != 0) {
715  RT fac = m_b_scalar * RT(dxinv[0]);
716  Box bflux = amrex::surroundingNodes(box, 0);
717  if (this->m_has_metric_term) {
718  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
719  {
720  mlalap_flux_x_m(tbox, fxarr, solarr, fac, dx, probxlo, ncomp);
721  });
722  } else {
723  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
724  {
725  mlalap_flux_x(tbox, fxarr, solarr, fac, ncomp);
726  });
727  }
728  } else {
729  flux[0]->template setVal<RunOn::Device>(RT(0.0));
730  }
731  if (this->hiddenDirection() != 1) {
732  RT fac = m_b_scalar * RT(dxinv[1]);
733  Box bflux = amrex::surroundingNodes(box, 1);
734  if (this->m_has_metric_term) {
735  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
736  {
737  mlalap_flux_y_m(tbox, fyarr, solarr, fac, dx, probxlo, ncomp);
738  });
739  } else {
740  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
741  {
742  mlalap_flux_y(tbox, fyarr, solarr, fac, ncomp);
743  });
744  }
745  } else {
746  flux[1]->template setVal<RunOn::Device>(RT(0.0));
747  }
748  }
749 #else
750  if (face_only) {
751  RT fac = m_b_scalar * RT(dxinv[0]);
752  Box blo = amrex::bdryLo(box, 0);
753  int blen = box.length(0);
754  if (this->m_has_metric_term) {
756  {
757  mlalap_flux_xface_m(tbox, fxarr, solarr, fac, blen, dx, probxlo, ncomp);
758  });
759  } else {
761  {
762  mlalap_flux_xface(tbox, fxarr, solarr, fac, blen, ncomp);
763  });
764  }
765  } else {
766  RT fac = m_b_scalar * RT(dxinv[0]);
767  Box bflux = amrex::surroundingNodes(box, 0);
768  if (this->m_has_metric_term) {
769  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
770  {
771  mlalap_flux_x_m(tbox, fxarr, solarr, fac, dx, probxlo, ncomp);
772  });
773  } else {
774  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
775  {
776  mlalap_flux_x(tbox, fxarr, solarr, fac, ncomp);
777  });
778  }
779  }
780 #endif
781 }
782 
783 extern template class MLALaplacianT<MultiFab>;
784 
786 
787 }
788 
789 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition: AMReX_GpuLaunch.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
Definition: AMReX_FabFactory.H:50
Definition: AMReX_MFIter.H:57
Box tilebox() const noexcept
Return the tile Box at the current index.
Definition: AMReX_MFIter.cpp:385
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_MLALaplacian.H:14
Vector< Vector< MF > > m_a_coeffs
Definition: AMReX_MLALaplacian.H:87
Array< MF const *, AMREX_SPACEDIM > getBCoeffs(int, int) const final
Definition: AMReX_MLALaplacian.H:69
RT getAScalar() const final
Definition: AMReX_MLALaplacian.H:65
MLALaplacianT(const MLALaplacianT< MF > &)=delete
RT m_b_scalar
Definition: AMReX_MLALaplacian.H:86
void averageDownCoeffsToCoarseAmrLevel(int flev)
Definition: AMReX_MLALaplacian.H:201
~MLALaplacianT() override
typename MF::fab_type FAB
Definition: AMReX_MLALaplacian.H:17
void setScalars(RT a, RT b) noexcept
Definition: AMReX_MLALaplacian.H:139
void update() override
Update for reuse.
Definition: AMReX_MLALaplacian.H:258
bool isBottomSingular() const final
Is the bottom of MG singular?
Definition: AMReX_MLALaplacian.H:55
RT m_a_scalar
Definition: AMReX_MLALaplacian.H:85
MF const * getACoeffs(int amrlev, int mglev) const final
Definition: AMReX_MLALaplacian.H:67
int m_ncomp
Definition: AMReX_MLALaplacian.H:91
void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, AMREX_SPACEDIM > &flux, const FAB &sol, Location, int face_only=0) const final
Definition: AMReX_MLALaplacian.H:587
void prepareForSolve() final
Definition: AMReX_MLALaplacian.H:247
void Fapply(int amrlev, int mglev, MF &out, const MF &in) const final
Definition: AMReX_MLALaplacian.H:268
LinOpBCType BCType
Definition: AMReX_MLALaplacian.H:20
void averageDownCoeffs()
Definition: AMReX_MLALaplacian.H:163
MLALaplacianT(MLALaplacianT< MF > &&)=delete
bool isSingular(int amrlev) const final
Is it singular on given AMR level?
Definition: AMReX_MLALaplacian.H:54
Vector< int > m_is_singular
Definition: AMReX_MLALaplacian.H:89
void normalize(int amrlev, int mglev, MF &mf) const final
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition: AMReX_MLALaplacian.H:337
void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const final
Definition: AMReX_MLALaplacian.H:404
bool needsUpdate() const override
Does it need update if it's reused?
Definition: AMReX_MLALaplacian.H:48
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLALaplacian.H:110
RT getBScalar() const final
Definition: AMReX_MLALaplacian.H:66
void averageDownCoeffsSameAmrLevel(int amrlev, Vector< MF > &a)
Definition: AMReX_MLALaplacian.H:180
typename MF::value_type RT
Definition: AMReX_MLALaplacian.H:18
int getNComp() const override
Return number of components.
Definition: AMReX_MLALaplacian.H:46
void updateSingularFlag()
Definition: AMReX_MLALaplacian.H:216
void setACoeffs(int amrlev, const MF &alpha)
Definition: AMReX_MLALaplacian.H:154
MLALaplacianT< MF > & operator=(const MLALaplacianT< MF > &)=delete
std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const final
Definition: AMReX_MLALaplacian.H:72
typename MLLinOpT< MF >::Location Location
Definition: AMReX_MLALaplacian.H:21
bool m_needs_update
Definition: AMReX_MLALaplacian.H:83
Definition: AMReX_MLCellABecLap.H:13
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLCellABecLap.H:94
void prepareForSolve() override
Definition: AMReX_MLCellABecLap.H:247
void update() override
Update for reuse.
Definition: AMReX_MLCellABecLap.H:240
Definition: AMReX_MultiMask.H:18
Array4< int const > array(const MFIter &mfi) const noexcept
Definition: AMReX_MultiMask.H:40
An Iterator over the Orientation of Faces of a Box.
Definition: AMReX_Orientation.H:135
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_adotx(Box const &box, Array4< RT > const &y, Array4< RT const > const &x, Array4< RT const > const &a, GpuArray< RT, 2 > const &dxinv, RT alpha, RT beta, int ncomp) noexcept
Definition: AMReX_MLALap_2D_K.H:9
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_gsrb(Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, RT dhy, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Array4< RT const > const &f2, Array4< int const > const &m2, Array4< RT const > const &f3, Array4< int const > const &m3, Box const &vbox, int redblack, int ncomp) noexcept
Definition: AMReX_MLALap_2D_K.H:278
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_yface_m(Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, int ylen, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_2D_K.H:253
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_y_m(Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_2D_K.H:211
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_normalize(Box const &box, Array4< RT > const &x, Array4< RT const > const &a, GpuArray< RT, 2 > const &dxinv, RT alpha, RT beta, int ncomp) noexcept
Definition: AMReX_MLALap_2D_K.H:65
Definition: AMReX_Amr.cpp:49
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_x(Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:101
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_normalize(Box const &box, Array4< RT > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:58
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_yface(Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, int ylen, int ncomp) noexcept
Definition: AMReX_MLALap_3D_K.H:126
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_gsrb(Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:168
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_adotx_m(Box const &box, Array4< RT > const &y, Array4< RT const > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:31
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_adotx(Box const &box, Array4< RT > const &y, Array4< RT const > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:9
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > bdryLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
Definition: AMReX_Box.H:1502
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 end(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1890
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_xface_m(Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int xlen, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:150
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_x_m(Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:117
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_zface(Box const &box, Array4< RT > const &fz, Array4< RT const > const &sol, RT fac, int zlen, int ncomp) noexcept
Definition: AMReX_MLALap_3D_K.H:170
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_y(Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, int ncomp) noexcept
Definition: AMReX_MLALap_3D_K.H:106
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_xface(Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int xlen, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:135
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 begin(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1881
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_gsrb_m(Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack, RT dx, RT probxlo, int ncomp)
Definition: AMReX_MLALap_1D_K.H:204
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_flux_z(Box const &box, Array4< RT > const &fz, Array4< RT const > const &sol, RT fac, int ncomp) noexcept
Definition: AMReX_MLALap_3D_K.H:150
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlalap_normalize_m(Box const &box, Array4< RT > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, RT dx, RT probxlo, int ncomp) noexcept
Definition: AMReX_MLALap_1D_K.H:78
std::array< T, N > Array
Definition: AMReX_Array.H:24
Definition: AMReX_Array.H:34
Definition: AMReX_MLLinOp.H:35
Location
Definition: AMReX_MLLinOp.H:87
Definition: AMReX_MFIter.H:20
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition: AMReX_MFIter.H:29
MFItInfo & SetDynamic(bool f) noexcept
Definition: AMReX_MFIter.H:34