Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_MLABecLaplacian.H
Go to the documentation of this file.
1#ifndef AMREX_ML_ABECLAPLACIAN_H_
2#define AMREX_ML_ABECLAPLACIAN_H_
3#include <AMReX_Config.H>
4
6#include <AMReX_MLABecLap_K.H>
7
8namespace amrex {
9
12template <typename MF>
14 : public MLCellABecLapT<MF>
15{
16public:
17
18 using FAB = typename MF::fab_type;
19 using RT = typename MF::value_type;
20
23
24 MLABecLaplacianT () = default;
26 const Vector<BoxArray>& a_grids,
27 const Vector<DistributionMapping>& a_dmap,
28 const LPInfo& a_info = LPInfo(),
29 const Vector<FabFactory<FAB> const*>& a_factory = {},
30 int a_ncomp = 1);
31
33 const Vector<BoxArray>& a_grids,
34 const Vector<DistributionMapping>& a_dmap,
35 const Vector<iMultiFab const*>& a_overset_mask, // 1: unknown, 0: known
36 const LPInfo& a_info = LPInfo(),
37 const Vector<FabFactory<FAB> const*>& a_factory = {},
38 int a_ncomp = 1);
39
40 ~MLABecLaplacianT () override;
41
46
47 void define (const Vector<Geometry>& a_geom,
48 const Vector<BoxArray>& a_grids,
49 const Vector<DistributionMapping>& a_dmap,
50 const LPInfo& a_info = LPInfo(),
51 const Vector<FabFactory<FAB> const*>& a_factory = {},
52 int a_ncomp = 1);
53
54 void define (const Vector<Geometry>& a_geom,
55 const Vector<BoxArray>& a_grids,
56 const Vector<DistributionMapping>& a_dmap,
57 const Vector<iMultiFab const*>& a_overset_mask,
58 const LPInfo& a_info = LPInfo(),
59 const Vector<FabFactory<FAB> const*>& a_factory = {},
60 int a_ncomp = 1);
61
67 template <typename T1, typename T2,
68 std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
69 std::is_convertible_v<T2,typename MF::value_type>,
70 int> = 0>
71 void setScalars (T1 a, T2 b) noexcept;
72
82 template <typename AMF,
83 std::enable_if_t<IsFabArray<AMF>::value &&
84 std::is_convertible_v<typename AMF::value_type,
85 typename MF::value_type>,
86 int> = 0>
87 void setACoeffs (int amrlev, const AMF& alpha);
88
98 template <typename T,
99 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
100 int> = 0>
101 void setACoeffs (int amrlev, T alpha);
102
112 template <typename AMF,
113 std::enable_if_t<IsFabArray<AMF>::value &&
114 std::is_convertible_v<typename AMF::value_type,
115 typename MF::value_type>,
116 int> = 0>
117 void setBCoeffs (int amrlev, const Array<AMF const*,AMREX_SPACEDIM>& beta);
118
128 template <typename T,
129 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
130 int> = 0>
131 void setBCoeffs (int amrlev, T beta);
132
142 template <typename T,
143 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
144 int> = 0>
145 void setBCoeffs (int amrlev, Vector<T> const& beta);
146
147 [[nodiscard]] int getNComp () const override { return m_ncomp; }
148
149 [[nodiscard]] bool needsUpdate () const override {
151 }
152 void update () override;
153
154 void prepareForSolve () override;
155 [[nodiscard]] bool isSingular (int amrlev) const override { return m_is_singular[amrlev]; }
156 [[nodiscard]] bool isBottomSingular () const override { return m_is_singular[0]; }
157 void Fapply (int amrlev, int mglev, MF& out, const MF& in) const override;
158 void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const override;
159 void FFlux (int amrlev, const MFIter& mfi,
160 const Array<FAB*,AMREX_SPACEDIM>& flux,
161 const FAB& sol, Location /* loc */,
162 int face_only=0) const override;
163
164 void normalize (int amrlev, int mglev, MF& mf) const override;
165
166 [[nodiscard]] RT getAScalar () const final { return m_a_scalar; }
167 [[nodiscard]] RT getBScalar () const final { return m_b_scalar; }
168 [[nodiscard]] MF const* getACoeffs (int amrlev, int mglev) const final
169 { return &(m_a_coeffs[amrlev][mglev]); }
170 [[nodiscard]] Array<MF const*,AMREX_SPACEDIM> getBCoeffs (int amrlev, int mglev) const final
171 { return amrex::GetArrOfConstPtrs(m_b_coeffs[amrlev][mglev]); }
172
173 [[nodiscard]] std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const final;
174
175 [[nodiscard]] bool supportNSolve () const override;
176
177 void copyNSolveSolution (MF& dst, MF const& src) const final;
178
179 void averageDownCoeffsSameAmrLevel (int amrlev, Vector<MF>& a,
181 void averageDownCoeffs ();
182 void averageDownCoeffsToCoarseAmrLevel (int flev);
183
185
187
188 static void FFlux (Box const& box, Real const* dxinv, RT bscalar,
190 Array<FAB*,AMREX_SPACEDIM> const& flux,
191 FAB const& sol, int face_only, int ncomp);
192
193 RT m_a_scalar = std::numeric_limits<RT>::quiet_NaN();
194 RT m_b_scalar = std::numeric_limits<RT>::quiet_NaN();
197
198 bool m_scalars_set = false;
199 bool m_acoef_set = false;
200
201protected:
202
203 bool m_needs_update = true;
204
206
207 [[nodiscard]] bool supportRobinBC () const noexcept override { return true; }
208
209private:
210
211 int m_ncomp = 1;
212
213 void define_ab_coeffs ();
214
215 void update_singular_flags ();
216};
217
218template <typename MF>
220 const Vector<BoxArray>& a_grids,
221 const Vector<DistributionMapping>& a_dmap,
222 const LPInfo& a_info,
223 const Vector<FabFactory<FAB> const*>& a_factory,
224 int a_ncomp)
225{
226 define(a_geom, a_grids, a_dmap, a_info, a_factory, a_ncomp);
227}
228
229template <typename MF>
231 const Vector<BoxArray>& a_grids,
232 const Vector<DistributionMapping>& a_dmap,
233 const Vector<iMultiFab const*>& a_overset_mask,
234 const LPInfo& a_info,
235 const Vector<FabFactory<FAB> const*>& a_factory,
236 int a_ncomp)
237{
238 define(a_geom, a_grids, a_dmap, a_overset_mask, a_info, a_factory, a_ncomp);
239}
240
241template <typename MF> MLABecLaplacianT<MF>::~MLABecLaplacianT () = default;
242
243template <typename MF>
244void
246 const Vector<BoxArray>& a_grids,
247 const Vector<DistributionMapping>& a_dmap,
248 const LPInfo& a_info,
249 const Vector<FabFactory<FAB> const*>& a_factory,
250 int a_ncomp)
251{
252 BL_PROFILE("MLABecLaplacian::define()");
253 this->m_ncomp = a_ncomp;
254 MLCellABecLapT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
255 define_ab_coeffs();
256}
257
258template <typename MF>
259void
261 const Vector<BoxArray>& a_grids,
262 const Vector<DistributionMapping>& a_dmap,
263 const Vector<iMultiFab const*>& a_overset_mask,
264 const LPInfo& a_info,
265 const Vector<FabFactory<FAB> const*>& a_factory,
266 int a_ncomp)
267{
268 BL_PROFILE("MLABecLaplacian::define(overset)");
269 this->m_ncomp = a_ncomp;
270 MLCellABecLapT<MF>::define(a_geom, a_grids, a_dmap, a_overset_mask, a_info, a_factory);
271 define_ab_coeffs();
272}
273
274template <typename MF>
275void
277{
278 m_a_coeffs.resize(this->m_num_amr_levels);
279 m_b_coeffs.resize(this->m_num_amr_levels);
280 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
281 {
282 m_a_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
283 m_b_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
284 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
285 {
286 m_a_coeffs[amrlev][mglev].define
287 (this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev],
288 1, 0, MFInfo(), *(this->m_factory[amrlev][mglev]));
289 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
290 {
291 const BoxArray& ba = amrex::convert(this->m_grids[amrlev][mglev],
293 m_b_coeffs[amrlev][mglev][idim].define
294 (ba, this->m_dmap[amrlev][mglev], m_ncomp, 0, MFInfo(),
295 *(this->m_factory[amrlev][mglev]));
296 }
297 }
298 }
299}
300
301template <typename MF>
302template <typename T1, typename T2,
303 std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
304 std::is_convertible_v<T2,typename MF::value_type>, int>>
305void
307{
308 m_a_scalar = RT(a);
309 m_b_scalar = RT(b);
310 if (m_a_scalar == RT(0.0)) {
311 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
312 m_a_coeffs[amrlev][0].setVal(RT(0.0));
313 }
314 m_acoef_set = true;
315 }
316 m_scalars_set = true;
317}
318
319template <typename MF>
320template <typename AMF,
321 std::enable_if_t<IsFabArray<AMF>::value &&
322 std::is_convertible_v<typename AMF::value_type,
323 typename MF::value_type>, int>>
324void
325MLABecLaplacianT<MF>::setACoeffs (int amrlev, const AMF& alpha)
326{
327 AMREX_ASSERT_WITH_MESSAGE(alpha.nComp() == 1,
328 "MLABecLaplacian::setACoeffs: alpha is supposed to be single component.");
329 m_a_coeffs[amrlev][0].LocalCopy(alpha, 0, 0, 1, IntVect(0));
330 m_needs_update = true;
331 m_acoef_set = true;
332}
333
334template <typename MF>
335template <typename T,
336 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
337void
339{
340 m_a_coeffs[amrlev][0].setVal(RT(alpha));
341 m_needs_update = true;
342 m_acoef_set = true;
343}
344
345
346template <typename MF>
347template <typename AMF,
348 std::enable_if_t<IsFabArray<AMF>::value &&
349 std::is_convertible_v<typename AMF::value_type,
350 typename MF::value_type>, int>>
351void
354{
355 const int ncomp = this->getNComp();
356 AMREX_ASSERT(beta[0]->nComp() == 1 || beta[0]->nComp() == ncomp);
357 if (beta[0]->nComp() == ncomp) {
358 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
359 for (int icomp = 0; icomp < ncomp; ++icomp) {
360 m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], icomp, icomp, 1, IntVect(0));
361 }
362 }
363 } else {
364 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
365 for (int icomp = 0; icomp < ncomp; ++icomp) {
366 m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], 0, icomp, 1, IntVect(0));
367 }
368 }
369 }
370 m_needs_update = true;
371}
372
373template <typename MF>
374template <typename T,
375 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
376void
378{
379 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
380 m_b_coeffs[amrlev][0][idim].setVal(RT(beta));
381 }
382 m_needs_update = true;
383}
384
385template <typename MF>
386template <typename T,
387 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,int>>
388void
390{
391 const int ncomp = this->getNComp();
392 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
393 for (int icomp = 0; icomp < ncomp; ++icomp) {
394 m_b_coeffs[amrlev][0][idim].setVal(RT(beta[icomp]));
395 }
396 }
397 m_needs_update = true;
398}
399
400template <typename MF>
401void
403{
406 }
407
408#if (AMREX_SPACEDIM != 3)
409 applyMetricTermsCoeffs();
410#endif
411
412 applyRobinBCTermsCoeffs();
413
414 averageDownCoeffs();
415
416 update_singular_flags();
417
418 m_needs_update = false;
419}
420
421template <typename MF>
422void
424{
425 BL_PROFILE("MLABecLaplacian::prepareForSolve()");
426
428
429#if (AMREX_SPACEDIM != 3)
430 applyMetricTermsCoeffs();
431#endif
432
433 applyRobinBCTermsCoeffs();
434
435 averageDownCoeffs();
436
437 update_singular_flags();
438
439 m_needs_update = false;
440}
441
442template <typename MF>
443void
445{
446#if (AMREX_SPACEDIM != 3)
447 for (int alev = 0; alev < this->m_num_amr_levels; ++alev)
448 {
449 const int mglev = 0;
450 this->applyMetricTerm(alev, mglev, m_a_coeffs[alev][mglev]);
451 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
452 {
453 this->applyMetricTerm(alev, mglev, m_b_coeffs[alev][mglev][idim]);
454 }
455 }
456#endif
457}
458
459//
460// Suppose we are solving `alpha u - del (beta grad u) = rhs` (Scalar
461// coefficients can be easily added back in the end) and there is Robin BC
462// `a u + b du/dn = f` at the upper end of the x-direction. The 1D
463// discretization at the last cell i is
464//
465// alpha u_i + (beta_{i-1/2} (du/dx)_{i-1/2} - beta_{i+1/2} (du/dx)_{i+1/2}) / h = rhs_i
466//
467// where h is the cell size. At `i+1/2` (i.e., the boundary), we have
468//
469// a (u_i + u_{i+1})/2 + b (u_{i+1}-u_i)/h = f,
470//
471// according to the Robin BC. This gives
472//
473// u_{i+1} = A + B u_i,
474//
475// where `A = f/(b/h + a/2)` and `B = (b/h - a/2) / (b/h + a/2). We then
476// use `u_i` and `u_{i+1}` to compute `(du/dx)_{i+1/2}`. The discretization
477// at cell i then becomes
478//
479// \tilde{alpha}_i u_i + (beta_{i-1/2} (du/dx)_{i-1/2} - 0) / h = \tilde{rhs}_i
480//
481// This is equivalent to having homogeneous Neumann BC with modified alpha and rhs.
482//
483// \tilde{alpha}_i = alpha_i + (1-B) beta_{i+1/2} / h^2
484// \tilde{rhs}_i = rhs_i + A beta_{i+1/2} / h^2
485//
487namespace detail {
488template <typename LP>
489void applyRobinBCTermsCoeffs (LP& linop)
490{
491 using RT = typename LP::RT;
492
493 const int ncomp = linop.getNComp();
494 bool reset_alpha = false;
495 if (linop.m_a_scalar == RT(0.0)) {
496 linop.m_a_scalar = RT(1.0);
497 reset_alpha = true;
498 }
499 const RT bovera = linop.m_b_scalar/linop.m_a_scalar;
500
501 if (!reset_alpha) {
502 AMREX_ALWAYS_ASSERT_WITH_MESSAGE(linop.m_scalars_set && linop.m_acoef_set,
503 "To reuse solver With Robin BC, one must re-call setScalars (and setACoeffs if the scalar is not zero)");
504 }
505
506 linop.m_scalars_set = false;
507 linop.m_acoef_set = false;
508
509 for (int amrlev = 0; amrlev < linop.NAMRLevels(); ++amrlev) {
510 const int mglev = 0;
511 const Box& domain = linop.Geom(amrlev,mglev).Domain();
512 const RT dxi = static_cast<RT>(linop.Geom(amrlev,mglev).InvCellSize(0));
513 const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? linop.Geom(amrlev,mglev).InvCellSize(1) : Real(1.0));
514 const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? linop.Geom(amrlev,mglev).InvCellSize(2) : Real(1.0));
515
516 if (reset_alpha) {
517 linop.m_a_coeffs[amrlev][mglev].setVal(RT(0.0));
518 }
519
520 MFItInfo mfi_info;
521 if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
522
523#ifdef AMREX_USE_OMP
524#pragma omp parallel if (Gpu::notInLaunchRegion())
525#endif
526 for (MFIter mfi(linop.m_a_coeffs[amrlev][mglev], mfi_info); mfi.isValid(); ++mfi)
527 {
528 const Box& vbx = mfi.validbox();
529 auto const& afab = linop.m_a_coeffs[amrlev][mglev].array(mfi);
530 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
531 auto const& bfab = linop.m_b_coeffs[amrlev][mglev][idim].const_array(mfi);
532 const Box& blo = amrex::adjCellLo(vbx,idim);
533 const Box& bhi = amrex::adjCellHi(vbx,idim);
534 bool outside_domain_lo = !(domain.contains(blo));
535 bool outside_domain_hi = !(domain.contains(bhi));
536 if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
537 for (int icomp = 0; icomp < ncomp; ++icomp) {
538 auto const& rbc = (*(linop.m_robin_bcval[amrlev]))[mfi].const_array(icomp*3);
539 if (linop.m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
540 {
541 if (idim == 0) {
542 RT fac = bovera*dxi*dxi;
543 AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
544 {
545 RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
546 / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
547 afab(i+1,j,k,icomp) += fac*bfab(i+1,j,k,icomp)*(RT(1.0)-B);
548 });
549 } else if (idim == 1) {
550 RT fac = bovera*dyi*dyi;
551 AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
552 {
553 RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
554 / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
555 afab(i,j+1,k,icomp) += fac*bfab(i,j+1,k,icomp)*(RT(1.0)-B);
556 });
557 } else {
558 RT fac = bovera*dzi*dzi;
559 AMREX_HOST_DEVICE_FOR_3D(blo, i, j, k,
560 {
561 RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
562 / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
563 afab(i,j,k+1,icomp) += fac*bfab(i,j,k+1,icomp)*(RT(1.0)-B);
564 });
565 }
566 }
567 if (linop.m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
568 {
569 if (idim == 0) {
570 RT fac = bovera*dxi*dxi;
571 AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
572 {
573 RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
574 / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
575 afab(i-1,j,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
576 });
577 } else if (idim == 1) {
578 RT fac = bovera*dyi*dyi;
579 AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
580 {
581 RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
582 / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
583 afab(i,j-1,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
584 });
585 } else {
586 RT fac = bovera*dzi*dzi;
587 AMREX_HOST_DEVICE_FOR_3D(bhi, i, j, k,
588 {
589 RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
590 / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
591 afab(i,j,k-1,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
592 });
593 }
594 }
595 }
596 }
597 }
598 }
599}
600} // namespace detail
602
603template <typename MF>
604void
606{
607 if (this->hasRobinBC()) {
608 detail::applyRobinBCTermsCoeffs(*this);
609 }
610}
611
612template <typename MF>
613void
615{
616 BL_PROFILE("MLABecLaplacian::averageDownCoeffs()");
617
618 for (int amrlev = this->m_num_amr_levels-1; amrlev > 0; --amrlev)
619 {
620 auto& fine_a_coeffs = m_a_coeffs[amrlev];
621 auto& fine_b_coeffs = m_b_coeffs[amrlev];
622
623 averageDownCoeffsSameAmrLevel(amrlev, fine_a_coeffs, fine_b_coeffs);
624 averageDownCoeffsToCoarseAmrLevel(amrlev);
625 }
626
627 averageDownCoeffsSameAmrLevel(0, m_a_coeffs[0], m_b_coeffs[0]);
628}
629
630template <typename MF>
631void
634{
635 int nmglevs = a.size();
636 for (int mglev = 1; mglev < nmglevs; ++mglev)
637 {
638 IntVect ratio = (amrlev > 0) ? IntVect(this->mg_coarsen_ratio) : this->mg_coarsen_ratio_vec[mglev-1];
639
640 if (m_a_scalar == 0.0)
641 {
642 a[mglev].setVal(RT(0.0));
643 }
644 else
645 {
646 amrex::average_down(a[mglev-1], a[mglev], 0, 1, ratio);
647 }
648
649 Vector<const MF*> fine {AMREX_D_DECL(&(b[mglev-1][0]),
650 &(b[mglev-1][1]),
651 &(b[mglev-1][2]))};
652 Vector<MF*> crse {AMREX_D_DECL(&(b[mglev][0]),
653 &(b[mglev][1]),
654 &(b[mglev][2]))};
655
657 }
658
659 for (int mglev = 1; mglev < nmglevs; ++mglev)
660 {
661 if (this->m_overset_mask[amrlev][mglev]) {
662 const RT fac = static_cast<RT>(1 << mglev); // 2**mglev
663 const RT osfac = RT(2.0)*fac/(fac+RT(1.0));
664 const int ncomp = this->getNComp();
665#ifdef AMREX_USE_OMP
666#pragma omp parallel if (Gpu::notInLaunchRegion())
667#endif
668 for (MFIter mfi(a[mglev],TilingIfNotGPU()); mfi.isValid(); ++mfi)
669 {
670 AMREX_D_TERM(Box const& xbx = mfi.nodaltilebox(0);,
671 Box const& ybx = mfi.nodaltilebox(1);,
672 Box const& zbx = mfi.nodaltilebox(2));
673 AMREX_D_TERM(auto const& bx = b[mglev][0].array(mfi);,
674 auto const& by = b[mglev][1].array(mfi);,
675 auto const& bz = b[mglev][2].array(mfi));
676 auto const& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
677#if defined(AMREX_USE_CUDA) && defined(_WIN32)
679 (xbx, t_xbx,
680 {
681 overset_rescale_bcoef_x(t_xbx, bx, osm, ncomp, osfac);
682 });
683#if (AMREX_SPACEDIM >= 2)
685 (ybx, t_ybx,
686 {
687 overset_rescale_bcoef_y(t_ybx, by, osm, ncomp, osfac);
688 });
689#endif
690#if (AMREX_SPACEDIM == 3)
692 (zbx, t_zbx,
693 {
694 overset_rescale_bcoef_z(t_zbx, bz, osm, ncomp, osfac);
695 });
696#endif
697#else
699 (xbx, t_xbx,
700 {
701 overset_rescale_bcoef_x(t_xbx, bx, osm, ncomp, osfac);
702 },
703 ybx, t_ybx,
704 {
705 overset_rescale_bcoef_y(t_ybx, by, osm, ncomp, osfac);
706 },
707 zbx, t_zbx,
708 {
709 overset_rescale_bcoef_z(t_zbx, bz, osm, ncomp, osfac);
710 });
711#endif
712 }
713 }
714 }
715}
716
717template <typename MF>
718void
720{
721 auto& fine_a_coeffs = m_a_coeffs[flev ].back();
722 auto& fine_b_coeffs = m_b_coeffs[flev ].back();
723 auto& crse_a_coeffs = m_a_coeffs[flev-1].front();
724 auto& crse_b_coeffs = m_b_coeffs[flev-1].front();
725
726 if (m_a_scalar != 0.0) {
727 // We coarsen from the back of flev to the front of flev-1.
728 // So we use mg_coarsen_ratio.
729 amrex::average_down(fine_a_coeffs, crse_a_coeffs, 0, 1, this->mg_coarsen_ratio);
730 }
731
733 amrex::GetArrOfPtrs(crse_b_coeffs),
734 IntVect(this->mg_coarsen_ratio),
735 this->m_geom[flev-1][0]);
736}
737
738template <typename MF>
739void
741{
742 m_is_singular.clear();
743 m_is_singular.resize(this->m_num_amr_levels, false);
744 auto itlo = std::find(this->m_lobc[0].begin(), this->m_lobc[0].end(), BCType::Dirichlet);
745 auto ithi = std::find(this->m_hibc[0].begin(), this->m_hibc[0].end(), BCType::Dirichlet);
746 if (itlo == this->m_lobc[0].end() && ithi == this->m_hibc[0].end())
747 { // No Dirichlet
748 for (int alev = 0; alev < this->m_num_amr_levels; ++alev)
749 {
750 // For now this assumes that overset regions are treated as Dirichlet bc's
751 if (this->m_domain_covered[alev] && !this->m_overset_mask[alev][0])
752 {
753 if (m_a_scalar == Real(0.0))
754 {
755 m_is_singular[alev] = true;
756 }
757 else
758 {
759 RT asum = m_a_coeffs[alev].back().sum(0,IntVect(0));
760 RT amax = m_a_coeffs[alev].back().norminf(0,1,IntVect(0));
761 m_is_singular[alev] = (std::abs(asum) <= amax * RT(1.e-12));
762 }
763 }
764 }
765 }
766
767 if (!m_is_singular[0] && this->m_needs_coarse_data_for_bc &&
768 this->m_coarse_fine_bc_type == LinOpBCType::Neumann)
769 {
770 AMREX_ASSERT(this->m_overset_mask[0][0] == nullptr);
771
772 bool lev0_a_is_zero = false;
773 if (m_a_scalar == Real(0.0)) {
774 lev0_a_is_zero = true;
775 } else {
776 RT asum = m_a_coeffs[0].back().sum(0,IntVect(0));
777 RT amax = m_a_coeffs[0].back().norminf(0,1,IntVect(0));
778 bool a_is_almost_zero = std::abs(asum) <= amax * RT(1.e-12);
779 if (a_is_almost_zero) { lev0_a_is_zero = true; }
780 }
781
782 if (lev0_a_is_zero) {
783 auto bbox = this->m_grids[0][0].minimalBox();
784 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
785 if (this->m_lobc[0][idim] == LinOpBCType::Dirichlet) {
786 bbox.growLo(idim,1);
787 }
788 if (this->m_hibc[0][idim] == LinOpBCType::Dirichlet) {
789 bbox.growHi(idim,1);
790 }
791 }
792 if (this->m_geom[0][0].Domain().contains(bbox)) {
793 m_is_singular[0] = true;
794 }
795 }
796 }
797}
798
799template <typename MF>
800void
801MLABecLaplacianT<MF>::Fapply (int amrlev, int mglev, MF& out, const MF& in) const
802{
803 BL_PROFILE("MLABecLaplacian::Fapply()");
804
805 const MF& acoef = m_a_coeffs[amrlev][mglev];
806 AMREX_D_TERM(const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
807 const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
808 const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
809
811 {AMREX_D_DECL(static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
812 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
813 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
814
815 const RT ascalar = m_a_scalar;
816 const RT bscalar = m_b_scalar;
817
818 const int ncomp = this->getNComp();
819
820#ifdef AMREX_USE_GPU
821 if (Gpu::inLaunchRegion()) {
822 const auto& xma = in.const_arrays();
823 const auto& yma = out.arrays();
824 const auto& ama = acoef.arrays();
825 AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
826 const auto& byma = bycoef.const_arrays();,
827 const auto& bzma = bzcoef.const_arrays(););
828 if (this->m_overset_mask[amrlev][mglev]) {
829 const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
830 ParallelFor(out, IntVect(0), ncomp,
831 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
832 {
833 mlabeclap_adotx_os(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
834 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
835 osmma[box_no], dxinv, ascalar, bscalar);
836 });
837 } else {
838 ParallelFor(out, IntVect(0), ncomp,
839 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
840 {
841 mlabeclap_adotx(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
842 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
843 dxinv, ascalar, bscalar);
844 });
845 }
846 if (!Gpu::inNoSyncRegion()) {
848 }
849 } else
850#endif
851 {
852#ifdef AMREX_USE_OMP
853#pragma omp parallel if (Gpu::notInLaunchRegion())
854#endif
855 for (MFIter mfi(out, TilingIfNotGPU()); mfi.isValid(); ++mfi)
856 {
857 const Box& bx = mfi.tilebox();
858 const auto& xfab = in.array(mfi);
859 const auto& yfab = out.array(mfi);
860 const auto& afab = acoef.array(mfi);
861 AMREX_D_TERM(const auto& bxfab = bxcoef.array(mfi);,
862 const auto& byfab = bycoef.array(mfi);,
863 const auto& bzfab = bzcoef.array(mfi););
864 if (this->m_overset_mask[amrlev][mglev]) {
865 const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
866 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
867 {
868 mlabeclap_adotx_os(i,j,k,n, yfab, xfab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
869 osm, dxinv, ascalar, bscalar);
870 });
871 } else {
872 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
873 {
874 mlabeclap_adotx(i,j,k,n, yfab, xfab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
875 dxinv, ascalar, bscalar);
876 });
877 }
878 }
879 }
880}
881
882template <typename MF>
883void
884MLABecLaplacianT<MF>::Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const
885{
886 BL_PROFILE("MLABecLaplacian::Fsmooth()");
887
888 bool regular_coarsening = true;
889 if (amrlev == 0 && mglev > 0) {
890 regular_coarsening = this->mg_coarsen_ratio_vec[mglev-1] == this->mg_coarsen_ratio;
891 }
892
893 MF Ax;
894 if (! this->m_use_gauss_seidel && regular_coarsening) { // jacobi
895 Ax.define(sol.boxArray(), sol.DistributionMap(), sol.nComp(), 0);
896 Fapply(amrlev, mglev, Ax, sol);
897 }
898
899 const MF& acoef = m_a_coeffs[amrlev][mglev];
900 AMREX_ALWAYS_ASSERT(acoef.nGrowVect() == 0);
901 AMREX_D_TERM(const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
902 const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
903 const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
904 const auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
905 const auto& maskvals = this->m_maskvals [amrlev][mglev];
906
907 OrientationIter oitr;
908
909 const auto& f0 = undrrelxr[oitr()]; ++oitr;
910 const auto& f1 = undrrelxr[oitr()]; ++oitr;
911#if (AMREX_SPACEDIM > 1)
912 const auto& f2 = undrrelxr[oitr()]; ++oitr;
913 const auto& f3 = undrrelxr[oitr()]; ++oitr;
914#if (AMREX_SPACEDIM > 2)
915 const auto& f4 = undrrelxr[oitr()]; ++oitr;
916 const auto& f5 = undrrelxr[oitr()]; ++oitr;
917#endif
918#endif
919
920 const MultiMask& mm0 = maskvals[0];
921 const MultiMask& mm1 = maskvals[1];
922#if (AMREX_SPACEDIM > 1)
923 const MultiMask& mm2 = maskvals[2];
924 const MultiMask& mm3 = maskvals[3];
925#if (AMREX_SPACEDIM > 2)
926 const MultiMask& mm4 = maskvals[4];
927 const MultiMask& mm5 = maskvals[5];
928#endif
929#endif
930
931 const int nc = this->getNComp();
932 const Real* h = this->m_geom[amrlev][mglev].CellSize();
933 AMREX_D_TERM(const RT dhx = m_b_scalar/static_cast<RT>(h[0]*h[0]);,
934 const RT dhy = m_b_scalar/static_cast<RT>(h[1]*h[1]);,
935 const RT dhz = m_b_scalar/static_cast<RT>(h[2]*h[2]));
936 const RT alpha = m_a_scalar;
937
938#ifdef AMREX_USE_GPU
940 && (this->m_overset_mask[amrlev][mglev] || regular_coarsening))
941 {
942 const auto& m0ma = mm0.const_arrays();
943 const auto& m1ma = mm1.const_arrays();
944#if (AMREX_SPACEDIM > 1)
945 const auto& m2ma = mm2.const_arrays();
946 const auto& m3ma = mm3.const_arrays();
947#if (AMREX_SPACEDIM > 2)
948 const auto& m4ma = mm4.const_arrays();
949 const auto& m5ma = mm5.const_arrays();
950#endif
951#endif
952
953 const auto& solnma = sol.arrays();
954 const auto& rhsma = rhs.const_arrays();
955 const auto& ama = acoef.const_arrays();
956
957 AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
958 const auto& byma = bycoef.const_arrays();,
959 const auto& bzma = bzcoef.const_arrays(););
960
961 const auto& f0ma = f0.const_arrays();
962 const auto& f1ma = f1.const_arrays();
963#if (AMREX_SPACEDIM > 1)
964 const auto& f2ma = f2.const_arrays();
965 const auto& f3ma = f3.const_arrays();
966#if (AMREX_SPACEDIM > 2)
967 const auto& f4ma = f4.const_arrays();
968 const auto& f5ma = f5.const_arrays();
969#endif
970#endif
971
972 if (this->m_overset_mask[amrlev][mglev]) {
973 const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
974 if (this->m_use_gauss_seidel) {
975 ParallelFor(sol, IntVect(0), nc,
976 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
977 {
978 Box vbx(ama[box_no]);
979 abec_gsrb_os(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
980 AMREX_D_DECL(dhx, dhy, dhz),
981 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
982 AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
983 AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
984 AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
985 AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
986 osmma[box_no], vbx, redblack);
987 });
988 } else {
989 const auto& axma = Ax.const_arrays();
990 ParallelFor(sol, IntVect(0), nc,
991 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
992 {
993 Box vbx(ama[box_no]);
994 abec_jacobi_os(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
995 alpha, ama[box_no],
996 AMREX_D_DECL(dhx, dhy, dhz),
997 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
998 AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
999 AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
1000 AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
1001 AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
1002 osmma[box_no], vbx);
1003 });
1004 }
1005 } else if (regular_coarsening) {
1006 if (this->m_use_gauss_seidel) {
1007 ParallelFor(sol, IntVect(0), nc,
1008 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1009 {
1010 Box vbx(ama[box_no]);
1011 abec_gsrb(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
1012 AMREX_D_DECL(dhx, dhy, dhz),
1013 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
1014 AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
1015 AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
1016 AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
1017 AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
1018 vbx, redblack);
1019 });
1020 } else {
1021 const auto& axma = Ax.const_arrays();
1022 ParallelFor(sol, IntVect(0), nc,
1023 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1024 {
1025 Box vbx(ama[box_no]);
1026 abec_jacobi(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
1027 alpha, ama[box_no],
1028 AMREX_D_DECL(dhx, dhy, dhz),
1029 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
1030 AMREX_D_DECL(m0ma[box_no],m2ma[box_no],m4ma[box_no]),
1031 AMREX_D_DECL(m1ma[box_no],m3ma[box_no],m5ma[box_no]),
1032 AMREX_D_DECL(f0ma[box_no],f2ma[box_no],f4ma[box_no]),
1033 AMREX_D_DECL(f1ma[box_no],f3ma[box_no],f5ma[box_no]),
1034 vbx);
1035 });
1036 }
1037 }
1038 if (Ax.local_size() > 0 || !Gpu::inNoSyncRegion()) {
1040 }
1041 } else
1042#endif
1043 {
1044 MFItInfo mfi_info;
1045 mfi_info.EnableTiling().SetDynamic(true);
1046
1047#ifdef AMREX_USE_OMP
1048#pragma omp parallel if (Gpu::notInLaunchRegion())
1049#endif
1050 for (MFIter mfi(sol,mfi_info); mfi.isValid(); ++mfi)
1051 {
1052 const auto& m0 = mm0.array(mfi);
1053 const auto& m1 = mm1.array(mfi);
1054#if (AMREX_SPACEDIM > 1)
1055 const auto& m2 = mm2.array(mfi);
1056 const auto& m3 = mm3.array(mfi);
1057#if (AMREX_SPACEDIM > 2)
1058 const auto& m4 = mm4.array(mfi);
1059 const auto& m5 = mm5.array(mfi);
1060#endif
1061#endif
1062
1063 const Box& tbx = mfi.tilebox();
1064 const Box& vbx = mfi.validbox();
1065 const auto& solnfab = sol.array(mfi);
1066 const auto& rhsfab = rhs.const_array(mfi);
1067 const auto& afab = acoef.const_array(mfi);
1068
1069 AMREX_D_TERM(const auto& bxfab = bxcoef.const_array(mfi);,
1070 const auto& byfab = bycoef.const_array(mfi);,
1071 const auto& bzfab = bzcoef.const_array(mfi););
1072
1073 const auto& f0fab = f0.const_array(mfi);
1074 const auto& f1fab = f1.const_array(mfi);
1075#if (AMREX_SPACEDIM > 1)
1076 const auto& f2fab = f2.const_array(mfi);
1077 const auto& f3fab = f3.const_array(mfi);
1078#if (AMREX_SPACEDIM > 2)
1079 const auto& f4fab = f4.const_array(mfi);
1080 const auto& f5fab = f5.const_array(mfi);
1081#endif
1082#endif
1083
1084 if (this->m_overset_mask[amrlev][mglev]) {
1085 const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
1086 if (this->m_use_gauss_seidel) {
1087 AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1088 {
1089 abec_gsrb_os(i,j,k,n, solnfab, rhsfab, alpha, afab,
1090 AMREX_D_DECL(dhx, dhy, dhz),
1091 AMREX_D_DECL(bxfab, byfab, bzfab),
1092 AMREX_D_DECL(m0,m2,m4),
1093 AMREX_D_DECL(m1,m3,m5),
1094 AMREX_D_DECL(f0fab,f2fab,f4fab),
1095 AMREX_D_DECL(f1fab,f3fab,f5fab),
1096 osm, vbx, redblack);
1097 });
1098 } else {
1099 const auto& axfab = Ax.const_array(mfi);
1100 AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1101 {
1102 abec_jacobi_os(i,j,k,n, solnfab, rhsfab, axfab,
1103 alpha, afab,
1104 AMREX_D_DECL(dhx, dhy, dhz),
1105 AMREX_D_DECL(bxfab, byfab, bzfab),
1106 AMREX_D_DECL(m0,m2,m4),
1107 AMREX_D_DECL(m1,m3,m5),
1108 AMREX_D_DECL(f0fab,f2fab,f4fab),
1109 AMREX_D_DECL(f1fab,f3fab,f5fab),
1110 osm, vbx);
1111 });
1112 }
1113 } else if (regular_coarsening) {
1114 if (this->m_use_gauss_seidel) {
1115 AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1116 {
1117 abec_gsrb(i,j,k,n, solnfab, rhsfab, alpha, afab,
1118 AMREX_D_DECL(dhx, dhy, dhz),
1119 AMREX_D_DECL(bxfab, byfab, bzfab),
1120 AMREX_D_DECL(m0,m2,m4),
1121 AMREX_D_DECL(m1,m3,m5),
1122 AMREX_D_DECL(f0fab,f2fab,f4fab),
1123 AMREX_D_DECL(f1fab,f3fab,f5fab),
1124 vbx, redblack);
1125 });
1126 } else {
1127 const auto& axfab = Ax.const_array(mfi);
1128 AMREX_LOOP_4D(tbx, nc, i, j, k, n,
1129 {
1130 abec_jacobi(i,j,k,n, solnfab, rhsfab, axfab,
1131 alpha, afab,
1132 AMREX_D_DECL(dhx, dhy, dhz),
1133 AMREX_D_DECL(bxfab, byfab, bzfab),
1134 AMREX_D_DECL(m0,m2,m4),
1135 AMREX_D_DECL(m1,m3,m5),
1136 AMREX_D_DECL(f0fab,f2fab,f4fab),
1137 AMREX_D_DECL(f1fab,f3fab,f5fab),
1138 vbx);
1139 });
1140 }
1141 } else {
1142 // line solve does not with with GPU
1143 abec_gsrb_with_line_solve(tbx, solnfab, rhsfab, alpha, afab,
1144 AMREX_D_DECL(dhx, dhy, dhz),
1145 AMREX_D_DECL(bxfab, byfab, bzfab),
1146 AMREX_D_DECL(m0,m2,m4),
1147 AMREX_D_DECL(m1,m3,m5),
1148 AMREX_D_DECL(f0fab,f2fab,f4fab),
1149 AMREX_D_DECL(f1fab,f3fab,f5fab),
1150 vbx, redblack, nc);
1151 }
1152 }
1153 }
1154}
1155
1156template <typename MF>
1157void
1158MLABecLaplacianT<MF>::FFlux (int amrlev, const MFIter& mfi,
1159 const Array<FAB*,AMREX_SPACEDIM>& flux,
1160 const FAB& sol, Location, int face_only) const
1161{
1162 BL_PROFILE("MLABecLaplacian::FFlux()");
1163
1164 const int mglev = 0;
1165 const Box& box = mfi.tilebox();
1166 const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
1167 const int ncomp = this->getNComp();
1168 FFlux(box, dxinv, m_b_scalar,
1169 Array<FAB const*,AMREX_SPACEDIM>{{AMREX_D_DECL(&(m_b_coeffs[amrlev][mglev][0][mfi]),
1170 &(m_b_coeffs[amrlev][mglev][1][mfi]),
1171 &(m_b_coeffs[amrlev][mglev][2][mfi]))}},
1172 flux, sol, face_only, ncomp);
1173}
1174
1175template <typename MF>
1176void
1177MLABecLaplacianT<MF>::FFlux (Box const& box, Real const* dxinv, RT bscalar,
1179 Array<FAB*,AMREX_SPACEDIM> const& flux,
1180 FAB const& sol, int face_only, int ncomp)
1181{
1182 AMREX_D_TERM(const auto bx = bcoef[0]->const_array();,
1183 const auto by = bcoef[1]->const_array();,
1184 const auto bz = bcoef[2]->const_array(););
1185 AMREX_D_TERM(const auto& fxarr = flux[0]->array();,
1186 const auto& fyarr = flux[1]->array();,
1187 const auto& fzarr = flux[2]->array(););
1188 const auto& solarr = sol.array();
1189
1190 if (face_only)
1191 {
1192 RT fac = bscalar*static_cast<RT>(dxinv[0]);
1193 Box blo = amrex::bdryLo(box, 0);
1194 int blen = box.length(0);
1196 {
1197 mlabeclap_flux_xface(tbox, fxarr, solarr, bx, fac, blen, ncomp);
1198 });
1199#if (AMREX_SPACEDIM >= 2)
1200 fac = bscalar*static_cast<RT>(dxinv[1]);
1201 blo = amrex::bdryLo(box, 1);
1202 blen = box.length(1);
1204 {
1205 mlabeclap_flux_yface(tbox, fyarr, solarr, by, fac, blen, ncomp);
1206 });
1207#endif
1208#if (AMREX_SPACEDIM == 3)
1209 fac = bscalar*static_cast<RT>(dxinv[2]);
1210 blo = amrex::bdryLo(box, 2);
1211 blen = box.length(2);
1213 {
1214 mlabeclap_flux_zface(tbox, fzarr, solarr, bz, fac, blen, ncomp);
1215 });
1216#endif
1217 }
1218 else
1219 {
1220 RT fac = bscalar*static_cast<RT>(dxinv[0]);
1221 Box bflux = amrex::surroundingNodes(box, 0);
1223 {
1224 mlabeclap_flux_x(tbox, fxarr, solarr, bx, fac, ncomp);
1225 });
1226#if (AMREX_SPACEDIM >= 2)
1227 fac = bscalar*static_cast<RT>(dxinv[1]);
1228 bflux = amrex::surroundingNodes(box, 1);
1230 {
1231 mlabeclap_flux_y(tbox, fyarr, solarr, by, fac, ncomp);
1232 });
1233#endif
1234#if (AMREX_SPACEDIM == 3)
1235 fac = bscalar*static_cast<RT>(dxinv[2]);
1236 bflux = amrex::surroundingNodes(box, 2);
1238 {
1239 mlabeclap_flux_z(tbox, fzarr, solarr, bz, fac, ncomp);
1240 });
1241#endif
1242 }
1243}
1244
1245template <typename MF>
1246void
1247MLABecLaplacianT<MF>::normalize (int amrlev, int mglev, MF& mf) const
1248{
1249 BL_PROFILE("MLABecLaplacian::normalize()");
1250
1251 const auto& acoef = m_a_coeffs[amrlev][mglev];
1252 AMREX_D_TERM(const auto& bxcoef = m_b_coeffs[amrlev][mglev][0];,
1253 const auto& bycoef = m_b_coeffs[amrlev][mglev][1];,
1254 const auto& bzcoef = m_b_coeffs[amrlev][mglev][2];);
1255
1256 const GpuArray<RT,AMREX_SPACEDIM> dxinv
1257 {AMREX_D_DECL(static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
1258 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
1259 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
1260
1261 const RT ascalar = m_a_scalar;
1262 const RT bscalar = m_b_scalar;
1263
1264 const int ncomp = getNComp();
1265
1266#ifdef AMREX_USE_GPU
1267 if (Gpu::inLaunchRegion()) {
1268 const auto& ma = mf.arrays();
1269 const auto& ama = acoef.const_arrays();
1270 AMREX_D_TERM(const auto& bxma = bxcoef.const_arrays();,
1271 const auto& byma = bycoef.const_arrays();,
1272 const auto& bzma = bzcoef.const_arrays(););
1273 ParallelFor(mf, IntVect(0), ncomp,
1274 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1275 {
1276 mlabeclap_normalize(i,j,k,n, ma[box_no], ama[box_no],
1277 AMREX_D_DECL(bxma[box_no],byma[box_no],bzma[box_no]),
1278 dxinv, ascalar, bscalar);
1279 });
1280 if (!Gpu::inNoSyncRegion()) {
1282 }
1283 } else
1284#endif
1285 {
1286#ifdef AMREX_USE_OMP
1287#pragma omp parallel if (Gpu::notInLaunchRegion())
1288#endif
1289 for (MFIter mfi(mf, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1290 {
1291 const Box& bx = mfi.tilebox();
1292 const auto& fab = mf.array(mfi);
1293 const auto& afab = acoef.array(mfi);
1294 AMREX_D_TERM(const auto& bxfab = bxcoef.array(mfi);,
1295 const auto& byfab = bycoef.array(mfi);,
1296 const auto& bzfab = bzcoef.array(mfi););
1297
1298 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1299 {
1300 mlabeclap_normalize(i,j,k,n, fab, afab, AMREX_D_DECL(bxfab,byfab,bzfab),
1301 dxinv, ascalar, bscalar);
1302 });
1303 }
1304 }
1305}
1306
1307template <typename MF>
1308bool
1310{
1311 bool support = false;
1312 if (this->m_overset_mask[0][0]) {
1313 if (this->m_geom[0].back().Domain().coarsenable(MLLinOp::mg_coarsen_ratio,
1314 this->mg_domain_min_width)
1315 && this->m_grids[0].back().coarsenable(MLLinOp::mg_coarsen_ratio, MLLinOp::mg_box_min_width))
1316 {
1317 support = true;
1318 }
1319 }
1320 return support;
1321}
1322
1323template <typename MF>
1324std::unique_ptr<MLLinOpT<MF>>
1325MLABecLaplacianT<MF>::makeNLinOp (int /*grid_size*/) const
1326{
1327 if (this->m_overset_mask[0][0] == nullptr) { return nullptr; }
1328
1329 const Geometry& geom = this->m_geom[0].back();
1330 const BoxArray& ba = this->m_grids[0].back();
1331 const DistributionMapping& dm = this->m_dmap[0].back();
1332
1333 std::unique_ptr<MLLinOpT<MF>> r
1334 {new MLABecLaplacianT<MF>({geom}, {ba}, {dm}, this->m_lpinfo_arg)};
1335
1336 auto nop = dynamic_cast<MLABecLaplacianT<MF>*>(r.get());
1337 if (!nop) {
1338 return nullptr;
1339 }
1340
1341 nop->m_parent = this;
1342
1343 nop->setMaxOrder(this->maxorder);
1344 nop->setVerbose(this->verbose);
1345
1346 nop->setDomainBC(this->m_lobc, this->m_hibc);
1347
1348 if (this->needsCoarseDataForBC())
1349 {
1350 const Real* dx0 = this->m_geom[0][0].CellSize();
1351 RealVect fac(this->m_coarse_data_crse_ratio);
1352 fac *= Real(0.5);
1353 RealVect cbloc {AMREX_D_DECL(dx0[0]*fac[0], dx0[1]*fac[1], dx0[2]*fac[2])};
1354 nop->setCoarseFineBCLocation(cbloc);
1355 }
1356
1357 nop->setScalars(m_a_scalar, m_b_scalar);
1358
1359 MF const& alpha_bottom = m_a_coeffs[0].back();
1360 iMultiFab const& osm_bottom = *(this->m_overset_mask[0].back());
1361 const int ncomp = alpha_bottom.nComp();
1362 MF alpha(ba, dm, ncomp, 0);
1363
1364 RT a_max = alpha_bottom.norminf(0, ncomp, IntVect(0), true, true);
1365 const int ncomp_b = m_b_coeffs[0].back()[0].nComp();
1366 AMREX_D_TERM(RT bx_max = m_b_coeffs[0].back()[0].norminf(0,ncomp_b,IntVect(0),true,true);,
1367 RT by_max = m_b_coeffs[0].back()[1].norminf(0,ncomp_b,IntVect(0),true,true);,
1368 RT bz_max = m_b_coeffs[0].back()[2].norminf(0,ncomp_b,IntVect(0),true,true));
1369 const GpuArray<RT,AMREX_SPACEDIM> dxinv
1370 {AMREX_D_DECL(static_cast<RT>(geom.InvCellSize(0)),
1371 static_cast<RT>(geom.InvCellSize(1)),
1372 static_cast<RT>(geom.InvCellSize(2)))};
1373 RT huge_alpha = RT(1.e30) *
1374 amrex::max(a_max*std::abs(m_a_scalar),
1375 AMREX_D_DECL(std::abs(m_b_scalar)*bx_max*dxinv[0]*dxinv[0],
1376 std::abs(m_b_scalar)*by_max*dxinv[1]*dxinv[1],
1377 std::abs(m_b_scalar)*bz_max*dxinv[2]*dxinv[2]));
1379
1380#ifdef AMREX_USE_GPU
1381 if (Gpu::inLaunchRegion() && alpha.isFusingCandidate()) {
1382 auto const& ama = alpha.arrays();
1383 auto const& abotma = alpha_bottom.const_arrays();
1384 auto const& mma = osm_bottom.const_arrays();
1385 ParallelFor(alpha, IntVect(0), ncomp,
1386 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1387 {
1388 if (mma[box_no](i,j,k)) {
1389 ama[box_no](i,j,k,n) = abotma[box_no](i,j,k,n);
1390 } else {
1391 ama[box_no](i,j,k,n) = huge_alpha;
1392 }
1393 });
1394 if (alpha.local_size() > 0 || !Gpu::inNoSyncRegion()) {
1396 }
1397 } else
1398#endif
1399 {
1400#ifdef AMREX_USE_OMP
1401#pragma omp parallel if (Gpu::notInLaunchRegion())
1402#endif
1403 for (MFIter mfi(alpha,TilingIfNotGPU()); mfi.isValid(); ++mfi) {
1404 Box const& bx = mfi.tilebox();
1405 auto const& a = alpha.array(mfi);
1406 auto const& abot = alpha_bottom.const_array(mfi);
1407 auto const& m = osm_bottom.const_array(mfi);
1408 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1409 {
1410 if (m(i,j,k)) {
1411 a(i,j,k,n) = abot(i,j,k,n);
1412 } else {
1413 a(i,j,k,n) = huge_alpha;
1414 }
1415 });
1416 }
1417 }
1418
1419 nop->setACoeffs(0, alpha);
1420 nop->setBCoeffs(0, GetArrOfConstPtrs(m_b_coeffs[0].back()));
1421
1422 return r;
1423}
1424
1425template <typename MF>
1426void
1427MLABecLaplacianT<MF>::copyNSolveSolution (MF& dst, MF const& src) const
1428{
1429 if (this->m_overset_mask[0].back() == nullptr) { return; }
1430
1431 const int ncomp = dst.nComp();
1432
1433#ifdef AMREX_USE_GPU
1434 if (Gpu::inLaunchRegion() && dst.isFusingCandidate()) {
1435 auto const& dstma = dst.arrays();
1436 auto const& srcma = src.const_arrays();
1437 auto const& mma = this->m_overset_mask[0].back()->const_arrays();
1438 ParallelFor(dst, IntVect(0), ncomp,
1439 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1440 {
1441 if (mma[box_no](i,j,k)) {
1442 dstma[box_no](i,j,k,n) = srcma[box_no](i,j,k,n);
1443 } else {
1444 dstma[box_no](i,j,k,n) = RT(0.0);
1445 }
1446 });
1447 if (!Gpu::inNoSyncRegion()) {
1449 }
1450 } else
1451#endif
1452 {
1453#ifdef AMREX_USE_OMP
1454#pragma omp parallel if (Gpu::notInLaunchRegion())
1455#endif
1456 for (MFIter mfi(dst,TilingIfNotGPU()); mfi.isValid(); ++mfi) {
1457 Box const& bx = mfi.tilebox();
1458 auto const& dfab = dst.array(mfi);
1459 auto const& sfab = src.const_array(mfi);
1460 auto const& m = this->m_overset_mask[0].back()->const_array(mfi);
1461 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1462 {
1463 if (m(i,j,k)) {
1464 dfab(i,j,k,n) = sfab(i,j,k,n);
1465 } else {
1466 dfab(i,j,k,n) = RT(0.0);
1467 }
1468 });
1469 }
1470 }
1471}
1472
1473extern template class MLABecLaplacianT<MultiFab>;
1474
1477
1478}
1479
1480#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_LAUNCH_HOST_DEVICE_LAMBDA_RANGE(TN, TI, block)
Definition AMReX_GpuLaunchMacrosC.nolint.H:4
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:106
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_DIM(...)
Definition AMReX_GpuLaunch.nolint.H:37
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
const Real * InvCellSize() const noexcept
Returns the inverse cellsize for each coordinate direction.
Definition AMReX_CoordSys.H:82
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition AMReX_FabArrayBase.H:83
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:587
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:649
Definition AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
__host__ static __device__ constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:698
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:85
Box tilebox() const noexcept
Return the tile Box at the current index.
Definition AMReX_MFIter.cpp:389
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:169
Definition AMReX_MLABecLaplacian.H:15
void setACoeffs(int amrlev, const AMF &alpha)
Definition AMReX_MLABecLaplacian.H:325
RT getBScalar() const final
Definition AMReX_MLABecLaplacian.H:167
int getNComp() const override
Return number of components.
Definition AMReX_MLABecLaplacian.H:147
void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, 3 > &flux, const FAB &sol, Location, int face_only=0) const override
Definition AMReX_MLABecLaplacian.H:1158
bool isSingular(int amrlev) const override
Is it singular on given AMR level?
Definition AMReX_MLABecLaplacian.H:155
void applyRobinBCTermsCoeffs()
Definition AMReX_MLABecLaplacian.H:605
Vector< Vector< Array< MF, 3 > > > m_b_coeffs
Definition AMReX_MLABecLaplacian.H:196
typename MF::fab_type FAB
Definition AMReX_MLABecLaplacian.H:18
MLABecLaplacianT< MF > & operator=(const MLABecLaplacianT< MF > &)=delete
bool supportRobinBC() const noexcept override
Definition AMReX_MLABecLaplacian.H:207
RT m_b_scalar
Definition AMReX_MLABecLaplacian.H:194
typename MF::value_type RT
Definition AMReX_MLABecLaplacian.H:19
~MLABecLaplacianT() override
void prepareForSolve() override
Definition AMReX_MLABecLaplacian.H:423
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={}, int a_ncomp=1)
Definition AMReX_MLABecLaplacian.H:245
RT m_a_scalar
Definition AMReX_MLABecLaplacian.H:193
void averageDownCoeffsToCoarseAmrLevel(int flev)
Definition AMReX_MLABecLaplacian.H:719
void averageDownCoeffsSameAmrLevel(int amrlev, Vector< MF > &a, Vector< Array< MF, 3 > > &b)
Definition AMReX_MLABecLaplacian.H:632
bool m_scalars_set
Definition AMReX_MLABecLaplacian.H:198
Vector< int > m_is_singular
Definition AMReX_MLABecLaplacian.H:205
void setBCoeffs(int amrlev, const Array< AMF const *, 3 > &beta)
Definition AMReX_MLABecLaplacian.H:352
void averageDownCoeffs()
Definition AMReX_MLABecLaplacian.H:614
RT getAScalar() const final
Definition AMReX_MLABecLaplacian.H:166
void normalize(int amrlev, int mglev, MF &mf) const override
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition AMReX_MLABecLaplacian.H:1247
void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const override
Definition AMReX_MLABecLaplacian.H:884
bool m_needs_update
Definition AMReX_MLABecLaplacian.H:203
std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const final
Definition AMReX_MLABecLaplacian.H:1325
bool m_acoef_set
Definition AMReX_MLABecLaplacian.H:199
Array< MF const *, 3 > getBCoeffs(int amrlev, int mglev) const final
Definition AMReX_MLABecLaplacian.H:170
MLABecLaplacianT(MLABecLaplacianT< MF > &&)=delete
Vector< Vector< MF > > m_a_coeffs
Definition AMReX_MLABecLaplacian.H:195
bool isBottomSingular() const override
Is the bottom of MG singular?
Definition AMReX_MLABecLaplacian.H:156
void applyMetricTermsCoeffs()
Definition AMReX_MLABecLaplacian.H:444
void copyNSolveSolution(MF &dst, MF const &src) const final
Definition AMReX_MLABecLaplacian.H:1427
bool supportNSolve() const override
Definition AMReX_MLABecLaplacian.H:1309
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLABecLaplacian.H:22
MF const * getACoeffs(int amrlev, int mglev) const final
Definition AMReX_MLABecLaplacian.H:168
void update() override
Update for reuse.
Definition AMReX_MLABecLaplacian.H:402
bool needsUpdate() const override
Does it need update if it's reused?
Definition AMReX_MLABecLaplacian.H:149
void setScalars(T1 a, T2 b) noexcept
Definition AMReX_MLABecLaplacian.H:306
void Fapply(int amrlev, int mglev, MF &out, const MF &in) const override
Definition AMReX_MLABecLaplacian.H:801
MLABecLaplacianT(const MLABecLaplacianT< MF > &)=delete
Definition AMReX_MLCellABecLap.H:14
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition AMReX_MLCellABecLap.H:95
void prepareForSolve() override
Definition AMReX_MLCellABecLap.H:250
void update() override
Update for reuse.
Definition AMReX_MLCellABecLap.H:243
static constexpr int mg_coarsen_ratio
Definition AMReX_MLLinOp.H:588
static constexpr int mg_box_min_width
Definition AMReX_MLLinOp.H:589
const MLLinOpT< MF > * m_parent
Definition AMReX_MLLinOp.H:604
Definition AMReX_MultiMask.H:18
MultiArray4< int const > const_arrays() const noexcept
Definition AMReX_MultiMask.H:48
Array4< int const > array(const MFIter &mfi) const noexcept
Definition AMReX_MultiMask.H:40
An Iterator over the Orientation of Faces of a Box.
Definition AMReX_Orientation.H:135
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
A Collection of IArrayBoxes.
Definition AMReX_iMultiFab.H:34
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
std::array< T, N > Array
Definition AMReX_Array.H:26
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:263
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
bool notInLaunchRegion() noexcept
Definition AMReX_GpuControl.H:93
bool inNoSyncRegion() noexcept
Definition AMReX_GpuControl.H:152
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition AMReX_Box.H:1735
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:2029
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2854
std::array< T const *, 3 > GetArrOfConstPtrs(const std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1017
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
__host__ __device__ BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direct...
Definition AMReX_Box.H:1714
__host__ __device__ BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition AMReX_Box.H:1522
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition AMReX_MultiFabUtil.H:989
__host__ __device__ BoxND< dim > bdryLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
Definition AMReX_Box.H:1625
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
std::array< T *, 3 > GetArrOfPtrs(std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1005
LinOpBCType
Definition AMReX_LO_BCTYPES.H:22
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
__host__ __device__ constexpr const T & max(const T &a, const T &b) noexcept
Definition AMReX_Algorithm.H:44
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:41
Definition AMReX_MLLinOp.H:36
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
MFItInfo & SetDynamic(bool f) noexcept
Definition AMReX_MFIter.H:40
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition AMReX_MFIter.H:31