Block-Structured AMR Software Framework
AMReX_MLABecLaplacian.H
Go to the documentation of this file.
1 #ifndef AMREX_ML_ABECLAPLACIAN_H_
2 #define AMREX_ML_ABECLAPLACIAN_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MLCellABecLap.H>
6 #include <AMReX_MLABecLap_K.H>
7 
8 namespace amrex {
9 
10 // (alpha * a - beta * (del dot b grad)) phi
11 
12 template <typename MF>
14  : public MLCellABecLapT<MF>
15 {
16 public:
17 
18  using FAB = typename MF::fab_type;
19  using RT = typename MF::value_type;
20 
21  using BCType = LinOpBCType;
22  using Location = typename MLLinOpT<MF>::Location;
23 
24  MLABecLaplacianT () = default;
25  MLABecLaplacianT (const Vector<Geometry>& a_geom,
26  const Vector<BoxArray>& a_grids,
27  const Vector<DistributionMapping>& a_dmap,
28  const LPInfo& a_info = LPInfo(),
29  const Vector<FabFactory<FAB> const*>& a_factory = {},
30  int a_ncomp = 1);
31 
32  MLABecLaplacianT (const Vector<Geometry>& a_geom,
33  const Vector<BoxArray>& a_grids,
34  const Vector<DistributionMapping>& a_dmap,
35  const Vector<iMultiFab const*>& a_overset_mask, // 1: unknown, 0: known
36  const LPInfo& a_info = LPInfo(),
37  const Vector<FabFactory<FAB> const*>& a_factory = {},
38  int a_ncomp = 1);
39 
40  ~MLABecLaplacianT () override;
41 
46 
47  void define (const Vector<Geometry>& a_geom,
48  const Vector<BoxArray>& a_grids,
49  const Vector<DistributionMapping>& a_dmap,
50  const LPInfo& a_info = LPInfo(),
51  const Vector<FabFactory<FAB> const*>& a_factory = {},
52  int a_ncomp = 1);
53 
54  void define (const Vector<Geometry>& a_geom,
55  const Vector<BoxArray>& a_grids,
56  const Vector<DistributionMapping>& a_dmap,
57  const Vector<iMultiFab const*>& a_overset_mask,
58  const LPInfo& a_info = LPInfo(),
59  const Vector<FabFactory<FAB> const*>& a_factory = {},
60  int a_ncomp = 1);
61 
67  template <typename T1, typename T2,
68  std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
69  std::is_convertible_v<T2,typename MF::value_type>,
70  int> = 0>
71  void setScalars (T1 a, T2 b) noexcept;
72 
82  template <typename AMF,
83  std::enable_if_t<IsFabArray<AMF>::value &&
84  std::is_convertible_v<typename AMF::value_type,
85  typename MF::value_type>,
86  int> = 0>
87  void setACoeffs (int amrlev, const AMF& alpha);
88 
98  template <typename T,
99  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
100  int> = 0>
101  void setACoeffs (int amrlev, T alpha);
102 
112  template <typename AMF,
113  std::enable_if_t<IsFabArray<AMF>::value &&
114  std::is_convertible_v<typename AMF::value_type,
115  typename MF::value_type>,
116  int> = 0>
117  void setBCoeffs (int amrlev, const Array<AMF const*,AMREX_SPACEDIM>& beta);
118 
128  template <typename T,
129  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
130  int> = 0>
131  void setBCoeffs (int amrlev, T beta);
132 
142  template <typename T,
143  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
144  int> = 0>
145  void setBCoeffs (int amrlev, Vector<T> const& beta);
146 
147  [[nodiscard]] int getNComp () const override { return m_ncomp; }
148 
149  [[nodiscard]] bool needsUpdate () const override {
151  }
152  void update () override;
153 
154  void prepareForSolve () override;
155  [[nodiscard]] bool isSingular (int amrlev) const override { return m_is_singular[amrlev]; }
156  [[nodiscard]] bool isBottomSingular () const override { return m_is_singular[0]; }
157  void Fapply (int amrlev, int mglev, MF& out, const MF& in) const final;
158  void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const final;
159  void FFlux (int amrlev, const MFIter& mfi,
160  const Array<FAB*,AMREX_SPACEDIM>& flux,
161  const FAB& sol, Location /* loc */,
162  int face_only=0) const final;
163 
164  void normalize (int amrlev, int mglev, MF& mf) const final;
165 
166  [[nodiscard]] RT getAScalar () const final { return m_a_scalar; }
167  [[nodiscard]] RT getBScalar () const final { return m_b_scalar; }
168  [[nodiscard]] MF const* getACoeffs (int amrlev, int mglev) const final
169  { return &(m_a_coeffs[amrlev][mglev]); }
170  [[nodiscard]] Array<MF const*,AMREX_SPACEDIM> getBCoeffs (int amrlev, int mglev) const final
171  { return amrex::GetArrOfConstPtrs(m_b_coeffs[amrlev][mglev]); }
172 
173  [[nodiscard]] std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const final;
174 
175  [[nodiscard]] bool supportNSolve () const final;
176 
177  void copyNSolveSolution (MF& dst, MF const& src) const final;
178 
179  void averageDownCoeffsSameAmrLevel (int amrlev, Vector<MF>& a,
180  Vector<Array<MF,AMREX_SPACEDIM> >& b);
181  void averageDownCoeffs ();
182  void averageDownCoeffsToCoarseAmrLevel (int flev);
183 
184  void applyMetricTermsCoeffs ();
185 
186  void applyRobinBCTermsCoeffs ();
187 
188  static void FFlux (Box const& box, Real const* dxinv, RT bscalar,
189  Array<FAB const*, AMREX_SPACEDIM> const& bcoef,
190  Array<FAB*,AMREX_SPACEDIM> const& flux,
191  FAB const& sol, int face_only, int ncomp);
192 
193  RT m_a_scalar = std::numeric_limits<RT>::quiet_NaN();
194  RT m_b_scalar = std::numeric_limits<RT>::quiet_NaN();
196  Vector<Vector<Array<MF,AMREX_SPACEDIM> > > m_b_coeffs;
197 
198  bool m_scalars_set = false;
199  bool m_acoef_set = false;
200 
201 protected:
202 
203  bool m_needs_update = true;
204 
206 
207  [[nodiscard]] bool supportRobinBC () const noexcept override { return true; }
208 
209 private:
210 
211  int m_ncomp = 1;
212 
213  void define_ab_coeffs ();
214 
215  void update_singular_flags ();
216 };
217 
218 template <typename MF>
220  const Vector<BoxArray>& a_grids,
221  const Vector<DistributionMapping>& a_dmap,
222  const LPInfo& a_info,
223  const Vector<FabFactory<FAB> const*>& a_factory,
224  int a_ncomp)
225 {
226  define(a_geom, a_grids, a_dmap, a_info, a_factory, a_ncomp);
227 }
228 
229 template <typename MF>
231  const Vector<BoxArray>& a_grids,
232  const Vector<DistributionMapping>& a_dmap,
233  const Vector<iMultiFab const*>& a_overset_mask,
234  const LPInfo& a_info,
235  const Vector<FabFactory<FAB> const*>& a_factory,
236  int a_ncomp)
237 {
238  define(a_geom, a_grids, a_dmap, a_overset_mask, a_info, a_factory, a_ncomp);
239 }
240 
241 template <typename MF> MLABecLaplacianT<MF>::~MLABecLaplacianT () = default;
242 
243 template <typename MF>
244 void
246  const Vector<BoxArray>& a_grids,
247  const Vector<DistributionMapping>& a_dmap,
248  const LPInfo& a_info,
249  const Vector<FabFactory<FAB> const*>& a_factory,
250  int a_ncomp)
251 {
252  BL_PROFILE("MLABecLaplacian::define()");
253  this->m_ncomp = a_ncomp;
254  MLCellABecLapT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
255  define_ab_coeffs();
256 }
257 
258 template <typename MF>
259 void
261  const Vector<BoxArray>& a_grids,
262  const Vector<DistributionMapping>& a_dmap,
263  const Vector<iMultiFab const*>& a_overset_mask,
264  const LPInfo& a_info,
265  const Vector<FabFactory<FAB> const*>& a_factory,
266  int a_ncomp)
267 {
268  BL_PROFILE("MLABecLaplacian::define(overset)");
269  this->m_ncomp = a_ncomp;
270  MLCellABecLapT<MF>::define(a_geom, a_grids, a_dmap, a_overset_mask, a_info, a_factory);
271  define_ab_coeffs();
272 }
273 
274 template <typename MF>
275 void
277 {
278  m_a_coeffs.resize(this->m_num_amr_levels);
279  m_b_coeffs.resize(this->m_num_amr_levels);
280  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
281  {
282  m_a_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
283  m_b_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
284  for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
285  {
286  m_a_coeffs[amrlev][mglev].define
287  (this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev],
288  1, 0, MFInfo(), *(this->m_factory[amrlev][mglev]));
289  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
290  {
291  const BoxArray& ba = amrex::convert(this->m_grids[amrlev][mglev],
293  m_b_coeffs[amrlev][mglev][idim].define
294  (ba, this->m_dmap[amrlev][mglev], m_ncomp, 0, MFInfo(),
295  *(this->m_factory[amrlev][mglev]));
296  }
297  }
298  }
299 }
300 
301 template <typename MF>
302 template <typename T1, typename T2,
303  std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
304  std::is_convertible_v<T2,typename MF::value_type>, int>>
305 void
306 MLABecLaplacianT<MF>::setScalars (T1 a, T2 b) noexcept
307 {
308  m_a_scalar = RT(a);
309  m_b_scalar = RT(b);
310  if (m_a_scalar == RT(0.0)) {
311  for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
312  m_a_coeffs[amrlev][0].setVal(RT(0.0));
313  }
314  m_acoef_set = true;
315  }
316  m_scalars_set = true;
317 }
318 
319 template <typename MF>
320 template <typename AMF,
321  std::enable_if_t<IsFabArray<AMF>::value &&
322  std::is_convertible_v<typename AMF::value_type,
323  typename MF::value_type>, int>>
324 void
325 MLABecLaplacianT<MF>::setACoeffs (int amrlev, const AMF& alpha)
326 {
327  AMREX_ASSERT_WITH_MESSAGE(alpha.nComp() == 1,
328  "MLABecLaplacian::setACoeffs: alpha is supposed to be single component.");
329  m_a_coeffs[amrlev][0].LocalCopy(alpha, 0, 0, 1, IntVect(0));
330  m_needs_update = true;
331  m_acoef_set = true;
332 }
333 
334 template <typename MF>
335 template <typename T,
336  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
337 void
338 MLABecLaplacianT<MF>::setACoeffs (int amrlev, T alpha)
339 {
340  m_a_coeffs[amrlev][0].setVal(RT(alpha));
341  m_needs_update = true;
342  m_acoef_set = true;
343 }
344 
345 
346 template <typename MF>
347 template <typename AMF,
348  std::enable_if_t<IsFabArray<AMF>::value &&
349  std::is_convertible_v<typename AMF::value_type,
350  typename MF::value_type>, int>>
351 void
354 {
355  const int ncomp = this->getNComp();
356  AMREX_ASSERT(beta[0]->nComp() == 1 || beta[0]->nComp() == ncomp);
357  if (beta[0]->nComp() == ncomp) {
358  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
359  for (int icomp = 0; icomp < ncomp; ++icomp) {
360  m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], icomp, icomp, 1, IntVect(0));
361  }
362  }
363  } else {
364  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
365  for (int icomp = 0; icomp < ncomp; ++icomp) {
366  m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], 0, icomp, 1, IntVect(0));
367  }
368  }
369  }
370  m_needs_update = true;
371 }
372 
373 template <typename MF>
374 template <typename T,
375  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
376 void
378 {
379  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
380  m_b_coeffs[amrlev][0][idim].setVal(RT(beta));
381  }
382  m_needs_update = true;
383 }
384 
385 template <typename MF>
386 template <typename T,
387  std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
388 void
390 {
391  const int ncomp = this->getNComp();
392  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
393  for (int icomp = 0; icomp < ncomp; ++icomp) {
394  m_b_coeffs[amrlev][0][idim].setVal(RT(beta[icomp]));
395  }
396  }
397  m_needs_update = true;
398 }
399 
400 template <typename MF>
401 void
403 {
406  }
407 
408 #if (AMREX_SPACEDIM != 3)
409  applyMetricTermsCoeffs();
410 #endif
411 
413 
414  averageDownCoeffs();
415 
416  update_singular_flags();
417 
418  m_needs_update = false;
419 }
420 
421 template <typename MF>
422 void
424 {
425  BL_PROFILE("MLABecLaplacian::prepareForSolve()");
426 
428 
429 #if (AMREX_SPACEDIM != 3)
430  applyMetricTermsCoeffs();
431 #endif
432 
434 
435  averageDownCoeffs();
436 
437  update_singular_flags();
438 
439  m_needs_update = false;
440 }
441 
442 template <typename MF>
443 void
445 {
446 #if (AMREX_SPACEDIM != 3)
447  for (int alev = 0; alev < this->m_num_amr_levels; ++alev)
448  {
449  const int mglev = 0;
450  this->applyMetricTerm(alev, mglev, m_a_coeffs[alev][mglev]);
451  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
452  {
453  this->applyMetricTerm(alev, mglev, m_b_coeffs[alev][mglev][idim]);
454  }
455  }
456 #endif
457 }
458 
459 //
460 // Suppose we are solving `alpha u - del (beta grad u) = rhs` (Scalar
461 // coefficients can be easily added back in the end) and there is Robin BC
462 // `a u + b du/dn = f` at the upper end of the x-direction. The 1D
463 // discretization at the last cell i is
464 //
465 // alpha u_i + (beta_{i-1/2} (du/dx)_{i-1/2} - beta_{i+1/2} (du/dx)_{i+1/2}) / h = rhs_i
466 //
467 // where h is the cell size. At `i+1/2` (i.e., the boundary), we have
468 //
469 // a (u_i + u_{i+1})/2 + b (u_{i+1}-u_i)/h = f,
470 //
471 // according to the Robin BC. This gives
472 //
473 // u_{i+1} = A + B u_i,
474 //
475 // where `A = f/(b/h + a/2)` and `B = (b/h - a/2) / (b/h + a/2). We then
476 // use `u_i` and `u_{i+1}` to compute `(du/dx)_{i+1/2}`. The discretization
477 // at cell i then becomes
478 //
479 // \tilde{alpha}_i u_i + (beta_{i-1/2} (du/dx)_{i-1/2} - 0) / h = \tilde{rhs}_i
480 //
481 // This is equivalent to having homogeneous Neumann BC with modified alpha and rhs.
482 //
483 // \tilde{alpha}_i = alpha_i + (1-B) beta_{i+1/2} / h^2
484 // \tilde{rhs}_i = rhs_i + A beta_{i+1/2} / h^2
485 //
486 namespace detail {
487 template <typename LP>
488 void applyRobinBCTermsCoeffs (LP& linop)
489 {
490  using RT = typename LP::RT;
491 
492  const int ncomp = linop.getNComp();
493  bool reset_alpha = false;
494  if (linop.m_a_scalar == RT(0.0)) {
495  linop.m_a_scalar = RT(1.0);
496  reset_alpha = true;
497  }
498  const RT bovera = linop.m_b_scalar/linop.m_a_scalar;
499 
500  if (!reset_alpha) {
501  AMREX_ALWAYS_ASSERT_WITH_MESSAGE(linop.m_scalars_set && linop.m_acoef_set,
502  "To reuse solver With Robin BC, one must re-call setScalars (and setACoeffs if the scalar is not zero)");
503  }
504 
505  linop.m_scalars_set = false;
506  linop.m_acoef_set = false;
507 
508  for (int amrlev = 0; amrlev < linop.NAMRLevels(); ++amrlev) {
509  const int mglev = 0;
510  const Box& domain = linop.Geom(amrlev,mglev).Domain();
511  const RT dxi = static_cast<RT>(linop.Geom(amrlev,mglev).InvCellSize(0));
512  const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? linop.Geom(amrlev,mglev).InvCellSize(1) : Real(1.0));
513  const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? linop.Geom(amrlev,mglev).InvCellSize(2) : Real(1.0));
514 
515  if (reset_alpha) {
516  linop.m_a_coeffs[amrlev][mglev].setVal(RT(0.0));
517  }
518 
519  MFItInfo mfi_info;
520  if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
521 
522 #ifdef AMREX_USE_OMP
523 #pragma omp parallel if (Gpu::notInLaunchRegion())
524 #endif
525  for (MFIter mfi(linop.m_a_coeffs[amrlev][mglev], mfi_info); mfi.isValid(); ++mfi)
526  {
527  const Box& vbx = mfi.validbox();
528  auto const& afab = linop.m_a_coeffs[amrlev][mglev].array(mfi);
529  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
530  auto const& bfab = linop.m_b_coeffs[amrlev][mglev][idim].const_array(mfi);
531  const Box& blo = amrex::adjCellLo(vbx,idim);
532  const Box& bhi = amrex::adjCellHi(vbx,idim);
533  bool outside_domain_lo = !(domain.contains(blo));
534  bool outside_domain_hi = !(domain.contains(bhi));
535  if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
536  for (int icomp = 0; icomp < ncomp; ++icomp) {
537  auto const& rbc = (*(linop.m_robin_bcval[amrlev]))[mfi].const_array(icomp*3);
538  if (linop.m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
539  {
540  if (idim == 0) {
541  RT fac = bovera*dxi*dxi;
542  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
543  {
544  RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
545  / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
546  afab(i+1,j,k,icomp) += fac*bfab(i+1,j,k,icomp)*(RT(1.0)-B);
547  });
548  } else if (idim == 1) {
549  RT fac = bovera*dyi*dyi;
550  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
551  {
552  RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
553  / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
554  afab(i,j+1,k,icomp) += fac*bfab(i,j+1,k,icomp)*(RT(1.0)-B);
555  });
556  } else {
557  RT fac = bovera*dzi*dzi;
558  AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
559  {
560  RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
561  / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
562  afab(i,j,k+1,icomp) += fac*bfab(i,j,k+1,icomp)*(RT(1.0)-B);
563  });
564  }
565  }
566  if (linop.m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
567  {
568  if (idim == 0) {
569  RT fac = bovera*dxi*dxi;
570  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
571  {
572  RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
573  / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
574  afab(i-1,j,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
575  });
576  } else if (idim == 1) {
577  RT fac = bovera*dyi*dyi;
578  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
579  {
580  RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
581  / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
582  afab(i,j-1,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
583  });
584  } else {
585  RT fac = bovera*dzi*dzi;
586  AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
587  {
588  RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
589  / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
590  afab(i,j,k-1,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
591  });
592  }
593  }
594  }
595  }
596  }
597  }
598 }
599 } // namespace detail
600 
601 template <typename MF>
602 void
604 {
605  if (this->hasRobinBC()) {
607  }
608 }
609 
610 template <typename MF>
611 void
613 {
614  BL_PROFILE("MLABecLaplacian::averageDownCoeffs()");
615 
616  for (int amrlev = this->m_num_amr_levels-1; amrlev > 0; --amrlev)
617  {
618  auto& fine_a_coeffs = m_a_coeffs[amrlev];
619  auto& fine_b_coeffs = m_b_coeffs[amrlev];
620 
621  averageDownCoeffsSameAmrLevel(amrlev, fine_a_coeffs, fine_b_coeffs);
622  averageDownCoeffsToCoarseAmrLevel(amrlev);
623  }
624 
625  averageDownCoeffsSameAmrLevel(0, m_a_coeffs[0], m_b_coeffs[0]);
626 }
627 
628 template <typename MF>
629 void
632 {
633  int nmglevs = a.size();
634  for (int mglev = 1; mglev < nmglevs; ++mglev)
635  {
636  IntVect ratio = (amrlev > 0) ? IntVect(this->mg_coarsen_ratio) : this->mg_coarsen_ratio_vec[mglev-1];
637 
638  if (m_a_scalar == 0.0)
639  {
640  a[mglev].setVal(RT(0.0));
641  }
642  else
643  {
644  amrex::average_down(a[mglev-1], a[mglev], 0, 1, ratio);
645  }
646 
647  Vector<const MF*> fine {AMREX_D_DECL(&(b[mglev-1][0]),
648  &(b[mglev-1][1]),
649  &(b[mglev-1][2]))};
650  Vector<MF*> crse {AMREX_D_DECL(&(b[mglev][0]),
651  &(b[mglev][1]),
652  &(b[mglev][2]))};
653 
654  amrex::average_down_faces(fine, crse, ratio, 0);
655  }
656 
657  for (int mglev = 1; mglev < nmglevs; ++mglev)
658  {
659  if (this->m_overset_mask[amrlev][mglev]) {
660  const RT fac = static_cast<RT>(1 << mglev); // 2**mglev
661  const RT osfac = RT(2.0)*fac/(fac+RT(1.0));
662  const int ncomp = this->getNComp();
663 #ifdef AMREX_USE_OMP
664 #pragma omp parallel if (Gpu::notInLaunchRegion())
665 #endif
666  for (MFIter mfi(a[mglev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
667  {
668  AMREX_D_TERM(Box const& xbx = mfi.nodaltilebox(0);,
669  Box const& ybx = mfi.nodaltilebox(1);,
670  Box const& zbx = mfi.nodaltilebox(2));
671  AMREX_D_TERM(auto const& bx = b[mglev][0].array(mfi);,
672  auto const& by = b[mglev][1].array(mfi);,
673  auto const& bz = b[mglev][2].array(mfi));
674  auto const& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
676  (xbx, t_xbx,
677  {
678  overset_rescale_bcoef_x(t_xbx, bx, osm, ncomp, osfac);
679  },
680  ybx, t_ybx,
681  {
682  overset_rescale_bcoef_y(t_ybx, by, osm, ncomp, osfac);
683  },
684  zbx, t_zbx,
685  {
686  overset_rescale_bcoef_z(t_zbx, bz, osm, ncomp, osfac);
687  });
688  }
689  }
690  }
691 }
692 
693 template <typename MF>
694 void
696 {
697  auto& fine_a_coeffs = m_a_coeffs[flev ].back();
698  auto& fine_b_coeffs = m_b_coeffs[flev ].back();
699  auto& crse_a_coeffs = m_a_coeffs[flev-1].front();
700  auto& crse_b_coeffs = m_b_coeffs[flev-1].front();
701 
702  if (m_a_scalar != 0.0) {
703  // We coarsen from the back of flev to the front of flev-1.
704  // So we use mg_coarsen_ratio.
705  amrex::average_down(fine_a_coeffs, crse_a_coeffs, 0, 1, this->mg_coarsen_ratio);
706  }
707 
709  amrex::GetArrOfPtrs(crse_b_coeffs),
710  IntVect(this->mg_coarsen_ratio),
711  this->m_geom[flev-1][0]);
712 }
713 
714 template <typename MF>
715 void
717 {
718  m_is_singular.clear();
719  m_is_singular.resize(this->m_num_amr_levels, false);
720  auto itlo = std::find(this->m_lobc[0].begin(), this->m_lobc[0].end(), BCType::Dirichlet);
721  auto ithi = std::find(this->m_hibc[0].begin(), this->m_hibc[0].end(), BCType::Dirichlet);
722  if (itlo == this->m_lobc[0].end() && ithi == this->m_hibc[0].end())
723  { // No Dirichlet
724  for (int alev = 0; alev < this->m_num_amr_levels; ++alev)
725  {
726  // For now this assumes that overset regions are treated as Dirichlet bc's
727  if (this->m_domain_covered[alev] && !this->m_overset_mask[alev][0])
728  {
729  if (m_a_scalar == Real(0.0))
730  {
731  m_is_singular[alev] = true;
732  }
733  else
734  {
735  RT asum = m_a_coeffs[alev].back().sum(0,IntVect(0));
736  RT amax = m_a_coeffs[alev].back().norminf(0,1,IntVect(0));
737  m_is_singular[alev] = (std::abs(asum) <= amax * RT(1.e-12));
738  }
739  }
740  }
741  }
742 
743  if (!m_is_singular[0] && this->m_needs_coarse_data_for_bc &&
744  this->m_coarse_fine_bc_type == LinOpBCType::Neumann)
745  {
746  AMREX_ASSERT(this->m_overset_mask[0][0] == nullptr);
747 
748  bool lev0_a_is_zero = false;
749  if (m_a_scalar == Real(0.0)) {
750  lev0_a_is_zero = true;
751  } else {
752  RT asum = m_a_coeffs[0].back().sum(0,IntVect(0));
753  RT amax = m_a_coeffs[0].back().norminf(0,1,IntVect(0));
754  bool a_is_almost_zero = std::abs(asum) <= amax * RT(1.e-12);
755  if (a_is_almost_zero) { lev0_a_is_zero = true; }
756  }
757 
758  if (lev0_a_is_zero) {
759  auto bbox = this->m_grids[0][0].minimalBox();
760  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
761  if (this->m_lobc[0][idim] == LinOpBCType::Dirichlet) {
762  bbox.growLo(idim,1);
763  }
764  if (this->m_hibc[0][idim] == LinOpBCType::Dirichlet) {
765  bbox.growHi(idim,1);
766  }
767  }
768  if (this->m_geom[0][0].Domain().contains(bbox)) {
769  m_is_singular[0] = true;
770  }
771  }
772  }
773 }
774 
775 template <typename MF>
776 void
777 MLABecLaplacianT<MF>::Fapply (int amrlev, int mglev, MF& out, const MF& in) const
778 {
779  BL_PROFILE("MLABecLaplacian::Fapply()");
780 
781  const MF& acoef = m_a_coeffs[amrlev][mglev];
782  AMREX_D_TERM(const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
783  const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
784  const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
785 
786  const GpuArray<RT,AMREX_SPACEDIM> dxinv
787  {AMREX_D_DECL(static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
788  static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
789  static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
790 
791  const RT ascalar = m_a_scalar;
792  const RT bscalar = m_b_scalar;
793 
794  const int ncomp = this->getNComp();
795 
796 #ifdef AMREX_USE_GPU
797  if (Gpu::inLaunchRegion()) {
798  const auto& xma = in.const_arrays();
799  const auto& yma = out.arrays();
800  const auto& ama = acoef.arrays();
801  AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
802  const auto& byma = bycoef.const_arrays();,
803  const auto& bzma = bzcoef.const_arrays(););
804  if (this->m_overset_mask[amrlev][mglev]) {
805  const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
806  ParallelFor(out, IntVect(0), ncomp,
807  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
808  {
809  mlabeclap_adotx_os(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
810  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
811  osmma[box_no], dxinv, ascalar, bscalar);
812  });
813  } else {
814  ParallelFor(out, IntVect(0), ncomp,
815  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
816  {
817  mlabeclap_adotx(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
818  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
819  dxinv, ascalar, bscalar);
820  });
821  }
823  } else
824 #endif
825  {
826 #ifdef AMREX_USE_OMP
827 #pragma omp parallel if (Gpu::notInLaunchRegion())
828 #endif
829  for (MFIter mfi(out, TilingIfNotGPU()); mfi.isValid(); ++mfi)
830  {
831  const Box& bx = mfi.tilebox();
832  const auto& xfab = in.array(mfi);
833  const auto& yfab = out.array(mfi);
834  const auto& afab = acoef.array(mfi);
835  AMREX_D_TERM(const auto& bxfab = bxcoef.array(mfi);,
836  const auto& byfab = bycoef.array(mfi);,
837  const auto& bzfab = bzcoef.array(mfi););
838  if (this->m_overset_mask[amrlev][mglev]) {
839  const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
840  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
841  {
842  mlabeclap_adotx_os(i,j,k,n, yfab, xfab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
843  osm, dxinv, ascalar, bscalar);
844  });
845  } else {
846  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
847  {
848  mlabeclap_adotx(i,j,k,n, yfab, xfab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
849  dxinv, ascalar, bscalar);
850  });
851  }
852  }
853  }
854 }
855 
856 template <typename MF>
857 void
858 MLABecLaplacianT<MF>::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const
859 {
860  BL_PROFILE("MLABecLaplacian::Fsmooth()");
861 
862  bool regular_coarsening = true;
863  if (amrlev == 0 && mglev > 0) {
864  regular_coarsening = this->mg_coarsen_ratio_vec[mglev-1] == this->mg_coarsen_ratio;
865  }
866 
867  MF Ax;
868  if (! this->m_use_gauss_seidel && regular_coarsening) { // jacobi
869  Ax.define(sol.boxArray(), sol.DistributionMap(), sol.nComp(), 0);
870  Fapply(amrlev, mglev, Ax, sol);
871  }
872 
873  const MF& acoef = m_a_coeffs[amrlev][mglev];
874  AMREX_ALWAYS_ASSERT(acoef.nGrowVect() == 0);
875  AMREX_D_TERM(const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
876  const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
877  const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
878  const auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
879  const auto& maskvals = this->m_maskvals [amrlev][mglev];
880 
881  OrientationIter oitr;
882 
883  const auto& f0 = undrrelxr[oitr()]; ++oitr;
884  const auto& f1 = undrrelxr[oitr()]; ++oitr;
885 #if (AMREX_SPACEDIM > 1)
886  const auto& f2 = undrrelxr[oitr()]; ++oitr;
887  const auto& f3 = undrrelxr[oitr()]; ++oitr;
888 #if (AMREX_SPACEDIM > 2)
889  const auto& f4 = undrrelxr[oitr()]; ++oitr;
890  const auto& f5 = undrrelxr[oitr()]; ++oitr;
891 #endif
892 #endif
893 
894  const MultiMask& mm0 = maskvals[0];
895  const MultiMask& mm1 = maskvals[1];
896 #if (AMREX_SPACEDIM > 1)
897  const MultiMask& mm2 = maskvals[2];
898  const MultiMask& mm3 = maskvals[3];
899 #if (AMREX_SPACEDIM > 2)
900  const MultiMask& mm4 = maskvals[4];
901  const MultiMask& mm5 = maskvals[5];
902 #endif
903 #endif
904 
905  const int nc = this->getNComp();
906  const Real* h = this->m_geom[amrlev][mglev].CellSize();
907  AMREX_D_TERM(const RT dhx = m_b_scalar/static_cast<RT>(h[0]*h[0]);,
908  const RT dhy = m_b_scalar/static_cast<RT>(h[1]*h[1]);,
909  const RT dhz = m_b_scalar/static_cast<RT>(h[2]*h[2]));
910  const RT alpha = m_a_scalar;
911 
912 #ifdef AMREX_USE_GPU
913  if (Gpu::inLaunchRegion()
914  && (this->m_overset_mask[amrlev][mglev] || regular_coarsening))
915  {
916  const auto& m0ma = mm0.const_arrays();
917  const auto& m1ma = mm1.const_arrays();
918 #if (AMREX_SPACEDIM > 1)
919  const auto& m2ma = mm2.const_arrays();
920  const auto& m3ma = mm3.const_arrays();
921 #if (AMREX_SPACEDIM > 2)
922  const auto& m4ma = mm4.const_arrays();
923  const auto& m5ma = mm5.const_arrays();
924 #endif
925 #endif
926 
927  const auto& solnma = sol.arrays();
928  const auto& rhsma = rhs.const_arrays();
929  const auto& ama = acoef.const_arrays();
930 
931  AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
932  const auto& byma = bycoef.const_arrays();,
933  const auto& bzma = bzcoef.const_arrays(););
934 
935  const auto& f0ma = f0.const_arrays();
936  const auto& f1ma = f1.const_arrays();
937 #if (AMREX_SPACEDIM > 1)
938  const auto& f2ma = f2.const_arrays();
939  const auto& f3ma = f3.const_arrays();
940 #if (AMREX_SPACEDIM > 2)
941  const auto& f4ma = f4.const_arrays();
942  const auto& f5ma = f5.const_arrays();
943 #endif
944 #endif
945 
946  if (this->m_overset_mask[amrlev][mglev]) {
947  const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
948  if (this->m_use_gauss_seidel) {
949  ParallelFor(sol, IntVect(0), nc,
950  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
951  {
952  Box vbx(ama[box_no]);
953  abec_gsrb_os(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
954  AMREX_D_DECL(dhx, dhy, dhz),
955  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
956  AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
957  AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
958  AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
959  AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
960  osmma[box_no], vbx, redblack);
961  });
962  } else {
963  const auto& axma = Ax.const_arrays();
964  ParallelFor(sol, IntVect(0), nc,
965  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
966  {
967  Box vbx(ama[box_no]);
968  abec_jacobi_os(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
969  alpha, ama[box_no],
970  AMREX_D_DECL(dhx, dhy, dhz),
971  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
972  AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
973  AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
974  AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
975  AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
976  osmma[box_no], vbx);
977  });
978  }
979  } else if (regular_coarsening) {
980  if (this->m_use_gauss_seidel) {
981  ParallelFor(sol, IntVect(0), nc,
982  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
983  {
984  Box vbx(ama[box_no]);
985  abec_gsrb(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
986  AMREX_D_DECL(dhx, dhy, dhz),
987  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
988  AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
989  AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
990  AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
991  AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
992  vbx, redblack);
993  });
994  } else {
995  const auto& axma = Ax.const_arrays();
996  ParallelFor(sol, IntVect(0), nc,
997  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
998  {
999  Box vbx(ama[box_no]);
1000  abec_jacobi(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
1001  alpha, ama[box_no],
1002  AMREX_D_DECL(dhx, dhy, dhz),
1003  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
1004  AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
1005  AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
1006  AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
1007  AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
1008  vbx);
1009  });
1010  }
1011  }
1013  } else
1014 #endif
1015  {
1016  MFItInfo mfi_info;
1017  mfi_info.EnableTiling().SetDynamic(true);
1018 
1019 #ifdef AMREX_USE_OMP
1020 #pragma omp parallel if (Gpu::notInLaunchRegion())
1021 #endif
1022  for (MFIter mfi(sol,mfi_info); mfi.isValid(); ++mfi)
1023  {
1024  const auto& m0 = mm0.array(mfi);
1025  const auto& m1 = mm1.array(mfi);
1026 #if (AMREX_SPACEDIM > 1)
1027  const auto& m2 = mm2.array(mfi);
1028  const auto& m3 = mm3.array(mfi);
1029 #if (AMREX_SPACEDIM > 2)
1030  const auto& m4 = mm4.array(mfi);
1031  const auto& m5 = mm5.array(mfi);
1032 #endif
1033 #endif
1034 
1035  const Box& tbx = mfi.tilebox();
1036  const Box& vbx = mfi.validbox();
1037  const auto& solnfab = sol.array(mfi);
1038  const auto& rhsfab = rhs.const_array(mfi);
1039  const auto& afab = acoef.const_array(mfi);
1040 
1041  AMREX_D_TERM(const auto& bxfab = bxcoef.const_array(mfi);,
1042  const auto& byfab = bycoef.const_array(mfi);,
1043  const auto& bzfab = bzcoef.const_array(mfi););
1044 
1045  const auto& f0fab = f0.const_array(mfi);
1046  const auto& f1fab = f1.const_array(mfi);
1047 #if (AMREX_SPACEDIM > 1)
1048  const auto& f2fab = f2.const_array(mfi);
1049  const auto& f3fab = f3.const_array(mfi);
1050 #if (AMREX_SPACEDIM > 2)
1051  const auto& f4fab = f4.const_array(mfi);
1052  const auto& f5fab = f5.const_array(mfi);
1053 #endif
1054 #endif
1055 
1056  if (this->m_overset_mask[amrlev][mglev]) {
1057  const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
1058  if (this->m_use_gauss_seidel) {
1059  AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1060  {
1061  abec_gsrb_os(i,j,k,n, solnfab, rhsfab, alpha, afab,
1062  AMREX_D_DECL(dhx, dhy, dhz),
1063  AMREX_D_DECL(bxfab, byfab, bzfab),
1064  AMREX_D_DECL(m0,m2,m4),
1065  AMREX_D_DECL(m1,m3,m5),
1066  AMREX_D_DECL(f0fab,f2fab,f4fab),
1067  AMREX_D_DECL(f1fab,f3fab,f5fab),
1068  osm, vbx, redblack);
1069  });
1070  } else {
1071  const auto& axfab = Ax.const_array(mfi);
1072  AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1073  {
1074  abec_jacobi_os(i,j,k,n, solnfab, rhsfab, axfab,
1075  alpha, afab,
1076  AMREX_D_DECL(dhx, dhy, dhz),
1077  AMREX_D_DECL(bxfab, byfab, bzfab),
1078  AMREX_D_DECL(m0,m2,m4),
1079  AMREX_D_DECL(m1,m3,m5),
1080  AMREX_D_DECL(f0fab,f2fab,f4fab),
1081  AMREX_D_DECL(f1fab,f3fab,f5fab),
1082  osm, vbx);
1083  });
1084  }
1085  } else if (regular_coarsening) {
1086  if (this->m_use_gauss_seidel) {
1087  AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1088  {
1089  abec_gsrb(i,j,k,n, solnfab, rhsfab, alpha, afab,
1090  AMREX_D_DECL(dhx, dhy, dhz),
1091  AMREX_D_DECL(bxfab, byfab, bzfab),
1092  AMREX_D_DECL(m0,m2,m4),
1093  AMREX_D_DECL(m1,m3,m5),
1094  AMREX_D_DECL(f0fab,f2fab,f4fab),
1095  AMREX_D_DECL(f1fab,f3fab,f5fab),
1096  vbx, redblack);
1097  });
1098  } else {
1099  const auto& axfab = Ax.const_array(mfi);
1100  AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1101  {
1102  abec_jacobi(i,j,k,n, solnfab, rhsfab, axfab,
1103  alpha, afab,
1104  AMREX_D_DECL(dhx, dhy, dhz),
1105  AMREX_D_DECL(bxfab, byfab, bzfab),
1106  AMREX_D_DECL(m0,m2,m4),
1107  AMREX_D_DECL(m1,m3,m5),
1108  AMREX_D_DECL(f0fab,f2fab,f4fab),
1109  AMREX_D_DECL(f1fab,f3fab,f5fab),
1110  vbx);
1111  });
1112  }
1113  } else {
1114  // line solve does not with with GPU
1115  abec_gsrb_with_line_solve(tbx, solnfab, rhsfab, alpha, afab,
1116  AMREX_D_DECL(dhx, dhy, dhz),
1117  AMREX_D_DECL(bxfab, byfab, bzfab),
1118  AMREX_D_DECL(m0,m2,m4),
1119  AMREX_D_DECL(m1,m3,m5),
1120  AMREX_D_DECL(f0fab,f2fab,f4fab),
1121  AMREX_D_DECL(f1fab,f3fab,f5fab),
1122  vbx, redblack, nc);
1123  }
1124  }
1125  }
1126 }
1127 
1128 template <typename MF>
1129 void
1130 MLABecLaplacianT<MF>::FFlux (int amrlev, const MFIter& mfi,
1131  const Array<FAB*,AMREX_SPACEDIM>& flux,
1132  const FAB& sol, Location, int face_only) const
1133 {
1134  BL_PROFILE("MLABecLaplacian::FFlux()");
1135 
1136  const int mglev = 0;
1137  const Box& box = mfi.tilebox();
1138  const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
1139  const int ncomp = this->getNComp();
1140  FFlux(box, dxinv, m_b_scalar,
1141  Array<FAB const*,AMREX_SPACEDIM>{{AMREX_D_DECL(&(m_b_coeffs[amrlev][mglev][0][mfi]),
1142  &(m_b_coeffs[amrlev][mglev][1][mfi]),
1143  &(m_b_coeffs[amrlev][mglev][2][mfi]))}},
1144  flux, sol, face_only, ncomp);
1145 }
1146 
1147 template <typename MF>
1148 void
1149 MLABecLaplacianT<MF>::FFlux (Box const& box, Real const* dxinv, RT bscalar,
1150  Array<FAB const*, AMREX_SPACEDIM> const& bcoef,
1151  Array<FAB*,AMREX_SPACEDIM> const& flux,
1152  FAB const& sol, int face_only, int ncomp)
1153 {
1154  AMREX_D_TERM(const auto bx = bcoef[0]->const_array();,
1155  const auto by = bcoef[1]->const_array();,
1156  const auto bz = bcoef[2]->const_array(););
1157  AMREX_D_TERM(const auto& fxarr = flux[0]->array();,
1158  const auto& fyarr = flux[1]->array();,
1159  const auto& fzarr = flux[2]->array(););
1160  const auto& solarr = sol.array();
1161 
1162  if (face_only)
1163  {
1164  RT fac = bscalar*static_cast<RT>(dxinv[0]);
1165  Box blo = amrex::bdryLo(box, 0);
1166  int blen = box.length(0);
1167  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( blo, tbox,
1168  {
1169  mlabeclap_flux_xface(tbox, fxarr, solarr, bx, fac, blen, ncomp);
1170  });
1171 #if (AMREX_SPACEDIM >= 2)
1172  fac = bscalar*static_cast<RT>(dxinv[1]);
1173  blo = amrex::bdryLo(box, 1);
1174  blen = box.length(1);
1175  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( blo, tbox,
1176  {
1177  mlabeclap_flux_yface(tbox, fyarr, solarr, by, fac, blen, ncomp);
1178  });
1179 #endif
1180 #if (AMREX_SPACEDIM == 3)
1181  fac = bscalar*static_cast<RT>(dxinv[2]);
1182  blo = amrex::bdryLo(box, 2);
1183  blen = box.length(2);
1184  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( blo, tbox,
1185  {
1186  mlabeclap_flux_zface(tbox, fzarr, solarr, bz, fac, blen, ncomp);
1187  });
1188 #endif
1189  }
1190  else
1191  {
1192  RT fac = bscalar*static_cast<RT>(dxinv[0]);
1193  Box bflux = amrex::surroundingNodes(box, 0);
1194  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
1195  {
1196  mlabeclap_flux_x(tbox, fxarr, solarr, bx, fac, ncomp);
1197  });
1198 #if (AMREX_SPACEDIM >= 2)
1199  fac = bscalar*static_cast<RT>(dxinv[1]);
1200  bflux = amrex::surroundingNodes(box, 1);
1201  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
1202  {
1203  mlabeclap_flux_y(tbox, fyarr, solarr, by, fac, ncomp);
1204  });
1205 #endif
1206 #if (AMREX_SPACEDIM == 3)
1207  fac = bscalar*static_cast<RT>(dxinv[2]);
1208  bflux = amrex::surroundingNodes(box, 2);
1209  AMREX_LAUNCH_HOST_DEVICE_LAMBDA ( bflux, tbox,
1210  {
1211  mlabeclap_flux_z(tbox, fzarr, solarr, bz, fac, ncomp);
1212  });
1213 #endif
1214  }
1215 }
1216 
1217 template <typename MF>
1218 void
1219 MLABecLaplacianT<MF>::normalize (int amrlev, int mglev, MF& mf) const
1220 {
1221  BL_PROFILE("MLABecLaplacian::normalize()");
1222 
1223  const auto& acoef = m_a_coeffs[amrlev][mglev];
1224  AMREX_D_TERM(const auto& bxcoef = m_b_coeffs[amrlev][mglev][0];,
1225  const auto& bycoef = m_b_coeffs[amrlev][mglev][1];,
1226  const auto& bzcoef = m_b_coeffs[amrlev][mglev][2];);
1227 
1228  const GpuArray<RT,AMREX_SPACEDIM> dxinv
1229  {AMREX_D_DECL(static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
1230  static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
1231  static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
1232 
1233  const RT ascalar = m_a_scalar;
1234  const RT bscalar = m_b_scalar;
1235 
1236  const int ncomp = getNComp();
1237 
1238 #ifdef AMREX_USE_GPU
1239  if (Gpu::inLaunchRegion()) {
1240  const auto& ma = mf.arrays();
1241  const auto& ama = acoef.const_arrays();
1242  AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
1243  const auto& byma = bycoef.const_arrays();,
1244  const auto& bzma = bzcoef.const_arrays(););
1245  ParallelFor(mf, IntVect(0), ncomp,
1246  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1247  {
1248  mlabeclap_normalize(i,j,k,n, ma[box_no], ama[box_no],
1249  AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
1250  dxinv, ascalar, bscalar);
1251  });
1253  } else
1254 #endif
1255  {
1256 #ifdef AMREX_USE_OMP
1257 #pragma omp parallel if (Gpu::notInLaunchRegion())
1258 #endif
1259  for (MFIter mfi(mf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1260  {
1261  const Box& bx = mfi.tilebox();
1262  const auto& fab = mf.array(mfi);
1263  const auto& afab = acoef.array(mfi);
1264  AMREX_D_TERM(const auto& bxfab = bxcoef.array(mfi);,
1265  const auto& byfab = bycoef.array(mfi);,
1266  const auto& bzfab = bzcoef.array(mfi););
1267 
1268  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1269  {
1270  mlabeclap_normalize(i,j,k,n, fab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
1271  dxinv, ascalar, bscalar);
1272  });
1273  }
1274  }
1275 }
1276 
1277 template <typename MF>
1278 bool
1280 {
1281  bool support = false;
1282  if (this->m_overset_mask[0][0]) {
1283  if (this->m_geom[0].back().Domain().coarsenable(MLLinOp::mg_coarsen_ratio,
1284  this->mg_domain_min_width)
1285  && this->m_grids[0].back().coarsenable(MLLinOp::mg_coarsen_ratio, MLLinOp::mg_box_min_width))
1286  {
1287  support = true;
1288  }
1289  }
1290  return support;
1291 }
1292 
1293 template <typename MF>
1294 std::unique_ptr<MLLinOpT<MF>>
1295 MLABecLaplacianT<MF>::makeNLinOp (int /*grid_size*/) const
1296 {
1297  if (this->m_overset_mask[0][0] == nullptr) { return nullptr; }
1298 
1299  const Geometry& geom = this->m_geom[0].back();
1300  const BoxArray& ba = this->m_grids[0].back();
1301  const DistributionMapping& dm = this->m_dmap[0].back();
1302 
1303  std::unique_ptr<MLLinOpT<MF>> r
1304  {new MLABecLaplacianT<MF>({geom}, {ba}, {dm}, this->m_lpinfo_arg)};
1305 
1306  auto nop = dynamic_cast<MLABecLaplacianT<MF>*>(r.get());
1307  if (!nop) {
1308  return nullptr;
1309  }
1310 
1311  nop->m_parent = this;
1312 
1313  nop->setMaxOrder(this->maxorder);
1314  nop->setVerbose(this->verbose);
1315 
1316  nop->setDomainBC(this->m_lobc, this->m_hibc);
1317 
1318  if (this->needsCoarseDataForBC())
1319  {
1320  const Real* dx0 = this->m_geom[0][0].CellSize();
1321  RealVect fac(this->m_coarse_data_crse_ratio);
1322  fac *= Real(0.5);
1323  RealVect cbloc {AMREX_D_DECL(dx0[0]*fac[0], dx0[1]*fac[1], dx0[2]*fac[2])};
1324  nop->setCoarseFineBCLocation(cbloc);
1325  }
1326 
1327  nop->setScalars(m_a_scalar, m_b_scalar);
1328 
1329  MF const& alpha_bottom = m_a_coeffs[0].back();
1330  iMultiFab const& osm_bottom = *(this->m_overset_mask[0].back());
1331  const int ncomp = alpha_bottom.nComp();
1332  MF alpha(ba, dm, ncomp, 0);
1333 
1334  RT a_max = alpha_bottom.norminf(0, ncomp, IntVect(0), true, true);
1335  const int ncomp_b = m_b_coeffs[0].back()[0].nComp();
1336  AMREX_D_TERM(RT bx_max = m_b_coeffs[0].back()[0].norminf(0,ncomp_b,IntVect(0),true,true);,
1337  RT by_max = m_b_coeffs[0].back()[1].norminf(0,ncomp_b,IntVect(0),true,true);,
1338  RT bz_max = m_b_coeffs[0].back()[2].norminf(0,ncomp_b,IntVect(0),true,true));
1339  const GpuArray<RT,AMREX_SPACEDIM> dxinv
1340  {AMREX_D_DECL(static_cast<RT>(geom.InvCellSize(0)),
1341  static_cast<RT>(geom.InvCellSize(1)),
1342  static_cast<RT>(geom.InvCellSize(2)))};
1343  RT huge_alpha = RT(1.e30) *
1344  amrex::max(a_max*std::abs(m_a_scalar),
1345  AMREX_D_DECL(std::abs(m_b_scalar)*bx_max*dxinv[0]*dxinv[0],
1346  std::abs(m_b_scalar)*by_max*dxinv[1]*dxinv[1],
1347  std::abs(m_b_scalar)*bz_max*dxinv[2]*dxinv[2]));
1349 
1350 #ifdef AMREX_USE_GPU
1351  if (Gpu::inLaunchRegion() && alpha.isFusingCandidate()) {
1352  auto const& ama = alpha.arrays();
1353  auto const& abotma = alpha_bottom.const_arrays();
1354  auto const& mma = osm_bottom.const_arrays();
1355  ParallelFor(alpha, IntVect(0), ncomp,
1356  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1357  {
1358  if (mma[box_no](i,j,k)) {
1359  ama[box_no](i,j,k,n) = abotma[box_no](i,j,k,n);
1360  } else {
1361  ama[box_no](i,j,k,n) = huge_alpha;
1362  }
1363  });
1365  } else
1366 #endif
1367  {
1368 #ifdef AMREX_USE_OMP
1369 #pragma omp parallel if (Gpu::notInLaunchRegion())
1370 #endif
1371  for (MFIter mfi(alpha,TilingIfNotGPU()); mfi.isValid(); ++mfi) {
1372  Box const& bx = mfi.tilebox();
1373  auto const& a = alpha.array(mfi);
1374  auto const& abot = alpha_bottom.const_array(mfi);
1375  auto const& m = osm_bottom.const_array(mfi);
1376  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1377  {
1378  if (m(i,j,k)) {
1379  a(i,j,k,n) = abot(i,j,k,n);
1380  } else {
1381  a(i,j,k,n) = huge_alpha;
1382  }
1383  });
1384  }
1385  }
1386 
1387  nop->setACoeffs(0, alpha);
1388  nop->setBCoeffs(0, GetArrOfConstPtrs(m_b_coeffs[0].back()));
1389 
1390  return r;
1391 }
1392 
1393 template <typename MF>
1394 void
1395 MLABecLaplacianT<MF>::copyNSolveSolution (MF& dst, MF const& src) const
1396 {
1397  if (this->m_overset_mask[0].back() == nullptr) { return; }
1398 
1399  const int ncomp = dst.nComp();
1400 
1401 #ifdef AMREX_USE_GPU
1402  if (Gpu::inLaunchRegion() && dst.isFusingCandidate()) {
1403  auto const& dstma = dst.arrays();
1404  auto const& srcma = src.const_arrays();
1405  auto const& mma = this->m_overset_mask[0].back()->const_arrays();
1406  ParallelFor(dst, IntVect(0), ncomp,
1407  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1408  {
1409  if (mma[box_no](i,j,k)) {
1410  dstma[box_no](i,j,k,n) = srcma[box_no](i,j,k,n);
1411  } else {
1412  dstma[box_no](i,j,k,n) = RT(0.0);
1413  }
1414  });
1416  } else
1417 #endif
1418  {
1419 #ifdef AMREX_USE_OMP
1420 #pragma omp parallel if (Gpu::notInLaunchRegion())
1421 #endif
1422  for (MFIter mfi(dst,TilingIfNotGPU()); mfi.isValid(); ++mfi) {
1423  Box const& bx = mfi.tilebox();
1424  auto const& dfab = dst.array(mfi);
1425  auto const& sfab = src.const_array(mfi);
1426  auto const& m = this->m_overset_mask[0].back()->const_array(mfi);
1427  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1428  {
1429  if (m(i,j,k)) {
1430  dfab(i,j,k,n) = sfab(i,j,k,n);
1431  } else {
1432  dfab(i,j,k,n) = RT(0.0);
1433  }
1434  });
1435  }
1436  }
1437 }
1438 
1439 extern template class MLABecLaplacianT<MultiFab>;
1440 
1442 
1443 }
1444 
1445 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:49
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition: AMReX_GpuLaunch.nolint.H:16
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_DIM(a1, a2, a3, b1, b2, b3, c1, c2, c3)
Definition: AMReX_GpuLaunch.nolint.H:29
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition: AMReX_GpuLaunch.nolint.H:50
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition: AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:550
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
const Real * InvCellSize() const noexcept
Returns the inverse cellsize for each coordinate direction.
Definition: AMReX_CoordSys.H:82
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition: AMReX_FabArrayBase.H:82
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1593
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition: AMReX_FabArray.H:1675
Definition: AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:691
Definition: AMReX_MFIter.H:57
Box tilebox() const noexcept
Return the tile Box at the current index.
Definition: AMReX_MFIter.cpp:385
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_MLABecLaplacian.H:15
void setACoeffs(int amrlev, const AMF &alpha)
Definition: AMReX_MLABecLaplacian.H:325
RT getBScalar() const final
Definition: AMReX_MLABecLaplacian.H:167
int getNComp() const override
Return number of components.
Definition: AMReX_MLABecLaplacian.H:147
int m_ncomp
Definition: AMReX_MLABecLaplacian.H:211
bool isSingular(int amrlev) const override
Is it singular on given AMR level?
Definition: AMReX_MLABecLaplacian.H:155
LinOpBCType BCType
Definition: AMReX_MLABecLaplacian.H:21
void applyRobinBCTermsCoeffs()
Definition: AMReX_MLABecLaplacian.H:603
void update_singular_flags()
Definition: AMReX_MLABecLaplacian.H:716
MLABecLaplacianT< MF > & operator=(const MLABecLaplacianT< MF > &)=delete
typename MF::fab_type FAB
Definition: AMReX_MLABecLaplacian.H:18
bool supportRobinBC() const noexcept override
Definition: AMReX_MLABecLaplacian.H:207
RT m_b_scalar
Definition: AMReX_MLABecLaplacian.H:194
typename MF::value_type RT
Definition: AMReX_MLABecLaplacian.H:19
~MLABecLaplacianT() override
void prepareForSolve() override
Definition: AMReX_MLABecLaplacian.H:423
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={}, int a_ncomp=1)
Definition: AMReX_MLABecLaplacian.H:245
RT m_a_scalar
Definition: AMReX_MLABecLaplacian.H:193
void define_ab_coeffs()
Definition: AMReX_MLABecLaplacian.H:276
void averageDownCoeffsSameAmrLevel(int amrlev, Vector< MF > &a, Vector< Array< MF, AMREX_SPACEDIM > > &b)
Definition: AMReX_MLABecLaplacian.H:630
void Fapply(int amrlev, int mglev, MF &out, const MF &in) const final
Definition: AMReX_MLABecLaplacian.H:777
void averageDownCoeffsToCoarseAmrLevel(int flev)
Definition: AMReX_MLABecLaplacian.H:695
bool m_scalars_set
Definition: AMReX_MLABecLaplacian.H:198
Vector< int > m_is_singular
Definition: AMReX_MLABecLaplacian.H:205
MF const * getACoeffs(int amrlev, int mglev) const final
Definition: AMReX_MLABecLaplacian.H:168
void setBCoeffs(int amrlev, const Array< AMF const *, AMREX_SPACEDIM > &beta)
Definition: AMReX_MLABecLaplacian.H:352
void normalize(int amrlev, int mglev, MF &mf) const final
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition: AMReX_MLABecLaplacian.H:1219
void averageDownCoeffs()
Definition: AMReX_MLABecLaplacian.H:612
RT getAScalar() const final
Definition: AMReX_MLABecLaplacian.H:166
bool m_needs_update
Definition: AMReX_MLABecLaplacian.H:203
bool supportNSolve() const final
Definition: AMReX_MLABecLaplacian.H:1279
std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const final
Definition: AMReX_MLABecLaplacian.H:1295
bool m_acoef_set
Definition: AMReX_MLABecLaplacian.H:199
MLABecLaplacianT(MLABecLaplacianT< MF > &&)=delete
Vector< Vector< MF > > m_a_coeffs
Definition: AMReX_MLABecLaplacian.H:195
void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const final
Definition: AMReX_MLABecLaplacian.H:858
bool isBottomSingular() const override
Is the bottom of MG singular?
Definition: AMReX_MLABecLaplacian.H:156
void applyMetricTermsCoeffs()
Definition: AMReX_MLABecLaplacian.H:444
void copyNSolveSolution(MF &dst, MF const &src) const final
Definition: AMReX_MLABecLaplacian.H:1395
Array< MF const *, AMREX_SPACEDIM > getBCoeffs(int amrlev, int mglev) const final
Definition: AMReX_MLABecLaplacian.H:170
typename MLLinOpT< MF >::Location Location
Definition: AMReX_MLABecLaplacian.H:22
void update() override
Update for reuse.
Definition: AMReX_MLABecLaplacian.H:402
void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, AMREX_SPACEDIM > &flux, const FAB &sol, Location, int face_only=0) const final
Definition: AMReX_MLABecLaplacian.H:1130
bool needsUpdate() const override
Does it need update if it's reused?
Definition: AMReX_MLABecLaplacian.H:149
void setScalars(T1 a, T2 b) noexcept
Definition: AMReX_MLABecLaplacian.H:306
MLABecLaplacianT(const MLABecLaplacianT< MF > &)=delete
Vector< Vector< Array< MF, AMREX_SPACEDIM > > > m_b_coeffs
Definition: AMReX_MLABecLaplacian.H:196
Definition: AMReX_MLCellABecLap.H:13
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition: AMReX_MLCellABecLap.H:94
void prepareForSolve() override
Definition: AMReX_MLCellABecLap.H:247
void update() override
Update for reuse.
Definition: AMReX_MLCellABecLap.H:240
static constexpr int mg_coarsen_ratio
Definition: AMReX_MLLinOp.H:571
static constexpr int mg_box_min_width
Definition: AMReX_MLLinOp.H:572
const MLLinOpT< MF > * m_parent
Definition: AMReX_MLLinOp.H:587
Definition: AMReX_MultiMask.H:18
Array4< int const > array(const MFIter &mfi) const noexcept
Definition: AMReX_MultiMask.H:40
MultiArray4< int const > const_arrays() const noexcept
Definition: AMReX_MultiMask.H:48
An Iterator over the Orientation of Faces of a Box.
Definition: AMReX_Orientation.H:135
A Real vector in SpaceDim-dimensional space.
Definition: AMReX_RealVect.H:32
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
Definition: AMReX_iMultiFab.H:32
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:126
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
void applyRobinBCTermsCoeffs(LP &linop)
Definition: AMReX_MLABecLaplacian.H:488
Definition: AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition: AMReX_FabArrayUtility.H:1682
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void overset_rescale_bcoef_x(Box const &box, Array4< T > const &bX, Array4< int const > const &osm, int ncomp, T osfac) noexcept
Definition: AMReX_MLABecLap_1D_K.H:239
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
int nComp(FabArrayBase const &fa)
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
std::array< T const *, AMREX_SPACEDIM > GetArrOfConstPtrs(const std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition: AMReX_Array.H:864
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & max(const T &a, const T &b) noexcept
Definition: AMReX_Algorithm.H:35
std::array< T *, AMREX_SPACEDIM > GetArrOfPtrs(std::array< T, AMREX_SPACEDIM > &a) noexcept
Definition: AMReX_Array.H:852
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition: AMReX_MultiFabUtil.H:851
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_adotx_os(int i, int, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< int const > const &osm, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept
Definition: AMReX_MLABecLap_1D_K.H:24
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_x(Box const &box, Array4< T > const &fx, Array4< T const > const &sol, Array4< T const > const &bx, T fac, int ncomp) noexcept
Definition: AMReX_MLABecLap_1D_K.H:56
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T abs(const GpuComplex< T > &a_z) noexcept
Return the absolute value of a complex number.
Definition: AMReX_GpuComplex.H:356
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_z(Box const &box, Array4< T > const &fz, Array4< T const > const &sol, Array4< T const > const &bz, T fac, int ncomp) noexcept
Definition: AMReX_MLABecLap_3D_K.H:163
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_y(Box const &box, Array4< T > const &fy, Array4< T const > const &sol, Array4< T const > const &by, T fac, int ncomp) noexcept
Definition: AMReX_MLABecLap_2D_K.H:104
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > bdryLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
Definition: AMReX_Box.H:1502
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 end(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1890
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void abec_gsrb_os(int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Array4< int const > const &osm, Box const &vbox, int redblack) noexcept
Definition: AMReX_MLABecLap_1D_K.H:121
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void overset_rescale_bcoef_y(Box const &box, Array4< T > const &bY, Array4< int const > const &osm, int ncomp, T osfac) noexcept
Definition: AMReX_MLABecLap_2D_K.H:437
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void abec_jacobi(int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Box const &vbox) noexcept
Definition: AMReX_MLABecLap_1D_K.H:160
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void abec_gsrb(int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Box const &vbox, int redblack) noexcept
Definition: AMReX_MLABecLap_1D_K.H:87
AMREX_FORCE_INLINE void abec_gsrb_with_line_solve(Box const &, Array4< T > const &, Array4< T const > const &, T, Array4< T const > const &, T, Array4< T const > const &, Array4< int const > const &, Array4< int const > const &, Array4< T const > const &, Array4< T const > const &, Box const &, int, int) noexcept
Definition: AMReX_MLABecLap_1D_K.H:223
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 begin(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1881
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_yface(Box const &box, Array4< T > const &fy, Array4< T const > const &sol, Array4< T const > const &by, T fac, int ylen, int ncomp) noexcept
Definition: AMReX_MLABecLap_2D_K.H:122
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_normalize(int i, int, int, int n, Array4< T > const &x, Array4< T const > const &a, Array4< T const > const &bX, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept
Definition: AMReX_MLABecLap_1D_K.H:44
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_zface(Box const &box, Array4< T > const &fz, Array4< T const > const &sol, Array4< T const > const &bz, T fac, int zlen, int ncomp) noexcept
Definition: AMReX_MLABecLap_3D_K.H:183
int verbose
Definition: AMReX_DistributionMapping.cpp:36
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void abec_jacobi_os(int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Array4< int const > const &osm, Box const &vbox) noexcept
Definition: AMReX_MLABecLap_1D_K.H:189
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_flux_xface(Box const &box, Array4< T > const &fx, Array4< T const > const &sol, Array4< T const > const &bx, T fac, int xlen, int ncomp) noexcept
Definition: AMReX_MLABecLap_1D_K.H:72
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void overset_rescale_bcoef_z(Box const &box, Array4< T > const &bZ, Array4< int const > const &osm, int ncomp, T osfac) noexcept
Definition: AMReX_MLABecLap_3D_K.H:777
std::array< T, N > Array
Definition: AMReX_Array.H:24
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlabeclap_adotx(int i, int, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept
Definition: AMReX_MLABecLap_1D_K.H:9
Definition: AMReX_FabArrayCommI.H:896
Definition: AMReX_Array.H:34
Definition: AMReX_MLLinOp.H:35
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MFIter.H:20
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition: AMReX_MFIter.H:29
MFItInfo & SetDynamic(bool f) noexcept
Definition: AMReX_MFIter.H:34