Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_MLCellLinOp.H
Go to the documentation of this file.
1#ifndef AMREX_ML_CELL_LINOP_H_
2#define AMREX_ML_CELL_LINOP_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MLLinOp.H>
6#include <AMReX_iMultiFab.H>
9#include <AMReX_MLLinOp_K.H>
10#include <AMReX_MLMG_K.H>
11
12#ifndef BL_NO_FORT
13#include <AMReX_MLLinOp_F.H>
14#endif
15
16namespace amrex {
17
18template <typename T>
20
21template <typename MF>
22class MLCellLinOpT // NOLINT(cppcoreguidelines-virtual-class-destructor)
23 : public MLLinOpT<MF>
24{
25public:
26
29
31 using BCMode = typename MLLinOpT<MF>::BCMode;
34
35 MLCellLinOpT ();
36 ~MLCellLinOpT () override = default;
37
38 MLCellLinOpT (const MLCellLinOpT<MF>&) = delete;
42
43 void define (const Vector<Geometry>& a_geom,
44 const Vector<BoxArray>& a_grids,
45 const Vector<DistributionMapping>& a_dmap,
46 const LPInfo& a_info = LPInfo(),
47 const Vector<FabFactory<FAB> const*>& a_factory = {});
48
49 void setLevelBC (int amrlev, const MF* levelbcdata,
50 const MF* robinbc_a = nullptr,
51 const MF* robinbc_b = nullptr,
52 const MF* robinbc_f = nullptr) final;
53
54 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF> &&
55 IsMultiFabLike_v<AMF>, int> = 0>
56 void setLevelBC (int amrlev, const AMF* levelbcdata,
57 const AMF* robinbc_a = nullptr,
58 const AMF* robinbc_b = nullptr,
59 const AMF* robinbc_f = nullptr)
60 {
61 this->MLLinOpT<MF>::template setLevelBC<AMF>(amrlev, levelbcdata, robinbc_a, robinbc_b, robinbc_f);
62 }
63
64 bool needsUpdate () const override {
66 }
67 void update () override;
68
69 void setGaussSeidel (bool flag) noexcept { m_use_gauss_seidel = flag; }
70
71 virtual bool isCrossStencil () const { return true; }
72 virtual bool isTensorOp () const { return false; }
73
74 void updateSolBC (int amrlev, const MF& crse_bcdata) const;
75 void updateCorBC (int amrlev, const MF& crse_bcdata) const;
76
77 virtual void applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode s_mode,
78 const MLMGBndryT<MF>* bndry=nullptr, bool skip_fillboundary=false) const;
79
80 BoxArray makeNGrids (int grid_size) const;
81
82 void restriction (int, int, MF& crse, MF& fine) const override;
83
84 void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const override;
85
86 void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const override;
87
88 void interpolationAmr (int famrlev, MF& fine, const MF& crse,
89 IntVect const& nghost) const override;
90
91 void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
92 const MF& fine_sol, const MF& fine_rhs) override;
93
94 void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
95 StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const override;
96 void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
97 bool skip_fillboundary, int niter) const final;
98
99 void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
100 const MF* crse_bcdata=nullptr) override;
101
102 void prepareForFluxes (int amrlev, const MF* crse_bcdata = nullptr) override;
103
104 void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
105 BCMode bc_mode, const MF* crse_bcdata=nullptr) final;
106
107 // The assumption is crse_sol's boundary has been filled, but not fine_sol.
108 void reflux (int crse_amrlev,
109 MF& res, const MF& crse_sol, const MF&,
110 MF&, MF& fine_sol, const MF&) const final;
111 void compFlux (int amrlev, const Array<MF*,AMREX_SPACEDIM>& fluxes,
112 MF& sol, Location loc) const override;
113 void compGrad (int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad,
114 MF& sol, Location loc) const override;
115
116 void applyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
117 void unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
118 Vector<RT> getSolvabilityOffset (int amrlev, int mglev,
119 MF const& rhs) const override;
120 void fixSolvabilityByOffset (int amrlev, int mglev, MF& rhs,
121 Vector<RT> const& offset) const override;
122
123 void prepareForSolve () override;
124
125 RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const final;
126
127 RT dotProductPrecond (Vector<MF const*> const& x, Vector<MF const*> const& y) const final;
128
129 RT norm2Precond (Vector<MF const*> const& x) const final;
130
131 virtual void Fapply (int amrlev, int mglev, MF& out, const MF& in) const = 0;
132 virtual void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const = 0;
133 virtual void FFlux (int amrlev, const MFIter& mfi,
134 const Array<FAB*,AMREX_SPACEDIM>& flux,
135 const FAB& sol, Location loc, int face_only=0) const = 0;
136
137 virtual void addInhomogNeumannFlux (int /*amrlev*/,
138 const Array<MF*,AMREX_SPACEDIM>& /*grad*/,
139 MF const& /*sol*/,
140 bool /*mult_bcoef*/) const {}
141
142 RT normInf (int amrlev, MF const& mf, bool local) const override;
143
144 void averageDownAndSync (Vector<MF>& sol) const override;
145
146 void avgDownResAmr (int clev, MF& cres, MF const& fres) const override;
147
148 void beginPrecondBC () override;
149 void endPrecondBC () override;
150
152 struct BCTL {
153 BoundCond type;
154 RT location;
155 };
157
159
160 void setInterpBndryHalfWidth (int w) { m_interpbndry_halfwidth = w; }
161
162protected:
163
164 bool m_has_metric_term = false;
165
168
171
173
174 // In case of agglomeration, coarse MG grids on amr level 0 are
175 // not simply coarsened from fine MG grids. So we need to build
176 // bcond and bcloc for each MG level.
180 class BndryCondLoc
181 {
182 public:
183 BndryCondLoc (const BoxArray& ba, const DistributionMapping& dm, int ncomp);
184
185 void setLOBndryConds (const Geometry& geom, const Real* dx,
188 IntVect const& ratio, const RealVect& interior_bloc,
189 const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
190 const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
191 LinOpBCType crse_fine_bc_type);
192
193 const Vector<BCTuple>& bndryConds (const MFIter& mfi) const noexcept {
194 return bcond[mfi];
195 }
196 const Vector<RealTuple>& bndryLocs (const MFIter& mfi) const noexcept {
197 return bcloc[mfi];
198 }
199 const BCTuple& bndryConds (const MFIter& mfi, int icomp) const noexcept {
200 return bcond[mfi][icomp];
201 }
202 const RealTuple& bndryLocs (const MFIter& mfi, int icomp) const noexcept {
203 return bcloc[mfi][icomp];
204 }
205 GpuArray<BCTL,2*AMREX_SPACEDIM> const* getBCTLPtr (const MFIter& mfi) const noexcept {
206 return bctl[mfi];
207 }
208 private:
213 int m_ncomp;
214 };
217
218 // used to save interpolation coefficients of the first interior cells
220
221 // boundary cell flags for covered, not_covered, outside_domain
223
225
227
228 bool m_use_gauss_seidel = true; // use red-black Gauss-Seidel by default
229
230private:
231
232 void defineAuxData ();
233 void defineBC ();
234
235 void computeVolInv () const;
236 mutable Vector<Vector<RT> > m_volinv; // used by solvability fix
237
238 int m_interpbndry_halfwidth = 2;
239
240 mutable Vector<Vector<TagVector<MLMGABCTag<RT>>>> m_bc_tags;
241};
242
244
245template <typename T>
246struct MLMGABCTag {
248 T bcloc;
249 Box bx;
250 BoundCond bctype;
251 int blen;
252 int comp;
253 Orientation face;
254 int local_index;
255
257 Box const& box() const noexcept { return bx; }
258};
259
260template <typename T>
261struct MLMGPSTag {
262 Array4<T> flo;
263 Array4<T> fhi;
264 Array4<int const> mlo;
265 Array4<int const> mhi;
266 T bcllo;
267 T bclhi;
268 Box bx;
269 BoundCond bctlo;
270 BoundCond bcthi;
271 int blen;
272 int comp;
273 int dir;
274
276 Box const& box() const noexcept { return bx; }
277};
278
279#ifdef AMREX_USE_EB
280template <typename T>
281struct MLMGPSEBTag {
282 Array4<T> flo;
283 Array4<T> fhi;
284 Array4<T const> ap;
285 Array4<int const> mlo;
286 Array4<int const> mhi;
287 T bcllo;
288 T bclhi;
289 Box bx;
290 BoundCond bctlo;
291 BoundCond bcthi;
292 int blen;
293 int comp;
294 int dir;
295
297 Box const& box() const noexcept { return bx; }
298};
299#endif
300
301template <typename MF>
302MLCellLinOpT<MF>::BndryCondLoc::BndryCondLoc (const BoxArray& ba,
303 const DistributionMapping& dm,
304 int ncomp)
305 : bcond(ba, dm),
306 bcloc(ba, dm),
307 bctl(ba, dm),
308 bctl_dv(bctl.local_size()*ncomp),
309 m_ncomp(ncomp)
310{
311 auto* dp = bctl_dv.data();
312 for (MFIter mfi(bcloc); mfi.isValid(); ++mfi) {
313 bcond[mfi].resize(ncomp);
314 bcloc[mfi].resize(ncomp);
315 bctl[mfi] = dp;
316 dp += ncomp;
317 }
318}
319
320template <typename MF>
321void
322MLCellLinOpT<MF>::BndryCondLoc::
323setLOBndryConds (const Geometry& geom, const Real* dx,
324 const Vector<Array<BCType,AMREX_SPACEDIM> >& lobc,
325 const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc,
326 IntVect const& ratio, const RealVect& interior_bloc,
327 const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
328 const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
329 LinOpBCType crse_fine_bc_type)
330{
331 const Box& domain = geom.Domain();
332
333#ifdef AMREX_USE_OMP
334#pragma omp parallel
335#endif
336 for (MFIter mfi(bcloc); mfi.isValid(); ++mfi)
337 {
338 const Box& bx = mfi.validbox();
339 for (int icomp = 0; icomp < m_ncomp; ++icomp) {
340 RealTuple & bloc = bcloc[mfi][icomp];
341 BCTuple & bctag = bcond[mfi][icomp];
342 MLMGBndryT<MF>::setBoxBC(bloc, bctag, bx, domain,
343 lobc[icomp], hibc[icomp],
344 dx, ratio, interior_bloc,
345 domain_bloc_lo, domain_bloc_hi,
346 geom.isPeriodicArray(),
347 crse_fine_bc_type);
348 }
349 }
350
351 Gpu::PinnedVector<GpuArray<BCTL,2*AMREX_SPACEDIM> > hv;
352 hv.reserve(bctl_dv.size());
353 for (MFIter mfi(bctl); mfi.isValid(); ++mfi)
354 {
355 for (int icomp = 0; icomp < m_ncomp; ++icomp) {
356 GpuArray<BCTL,2*AMREX_SPACEDIM> tmp;
357 for (int m = 0; m < 2*AMREX_SPACEDIM; ++m) {
358 tmp[m].type = bcond[mfi][icomp][m];
359 tmp[m].location = bcloc[mfi][icomp][m];
360 }
361 hv.push_back(std::move(tmp));
362 }
363 }
364 Gpu::copyAsync(Gpu::hostToDevice, hv.begin(), hv.end(), bctl_dv.begin());
366}
367
369
370template <typename MF>
372{
373 this->m_ixtype = IntVect::TheCellVector();
374}
375
376template <typename MF>
377void
379 const Vector<BoxArray>& a_grids,
380 const Vector<DistributionMapping>& a_dmap,
381 const LPInfo& a_info,
382 const Vector<FabFactory<FAB> const*>& a_factory)
383{
384 MLLinOpT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
385 defineAuxData();
386 defineBC();
387}
388
389template <typename MF>
390void
392{
393 BL_PROFILE("MLCellLinOp::defineAuxData()");
394
395 m_undrrelxr.resize(this->m_num_amr_levels);
396 m_maskvals.resize(this->m_num_amr_levels);
397 m_fluxreg.resize(this->m_num_amr_levels-1);
398 m_norm_fine_mask.resize(this->m_num_amr_levels-1);
399 m_bc_tags.resize(this->m_num_amr_levels);
400
401 const int ncomp = this->getNComp();
402
403 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
404 {
405 m_undrrelxr[amrlev].resize(this->m_num_mg_levels[amrlev]);
406 m_bc_tags[amrlev].resize(this->m_num_mg_levels[amrlev]);
407 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
408 {
409 m_undrrelxr[amrlev][mglev].define(this->m_grids[amrlev][mglev],
410 this->m_dmap[amrlev][mglev],
411 1, 0, 0, ncomp);
412 }
413 }
414
415 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
416 {
417 m_maskvals[amrlev].resize(this->m_num_mg_levels[amrlev]);
418 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
419 {
420 for (OrientationIter oitr; oitr; ++oitr)
421 {
422 const Orientation face = oitr();
423 const int ngrow = 1;
424 const int extent = this->isCrossStencil() ? 0 : 1; // extend to corners
425 m_maskvals[amrlev][mglev][face].define(this->m_grids[amrlev][mglev],
426 this->m_dmap[amrlev][mglev],
427 this->m_geom[amrlev][mglev],
428 face, 0, ngrow, extent, 1, true);
429 }
430 }
431 }
432
433 for (int amrlev = 0; amrlev < this->m_num_amr_levels-1; ++amrlev)
434 {
435 const IntVect ratio{this->AMRRefRatioVect(amrlev)};
436 m_fluxreg[amrlev].define(this->m_grids[amrlev+1][0],
437 this->m_grids[amrlev][0],
438 this->m_dmap[amrlev+1][0],
439 this->m_dmap[amrlev][0],
440 this->m_geom[amrlev+1][0],
441 this->m_geom[amrlev][0],
442 ratio, amrlev+1, ncomp);
443 m_fluxreg[amrlev].setDeterministic(this->info.deterministic);
444 m_norm_fine_mask[amrlev] = std::make_unique<iMultiFab>
445 (makeFineMask(this->m_grids[amrlev][0], this->m_dmap[amrlev][0],
446 this->m_grids[amrlev+1][0],
447 ratio, 1, 0));
448 }
449
450#if (AMREX_SPACEDIM != 3)
451 m_has_metric_term = !this->m_geom[0][0].IsCartesian() && this->info.has_metric_term;
452#endif
453}
454
455template <typename MF>
456void
457MLCellLinOpT<MF>::defineBC ()
458{
459 BL_PROFILE("MLCellLinOp::defineBC()");
460
461 const int ncomp = this->getNComp();
462
463 m_bndry_sol.resize(this->m_num_amr_levels);
464 m_crse_sol_br.resize(this->m_num_amr_levels);
465
466 m_bndry_cor.resize(this->m_num_amr_levels);
467 m_crse_cor_br.resize(this->m_num_amr_levels);
468
469 m_robin_bcval.resize(this->m_num_amr_levels);
470
471 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
472 {
473 m_bndry_sol[amrlev] = std::make_unique<MLMGBndryT<MF>>(this->m_grids[amrlev][0],
474 this->m_dmap[amrlev][0],
475 ncomp,
476 this->m_geom[amrlev][0]);
477 }
478
479 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
480 {
481 const int in_rad = 0;
482 const int out_rad = 1;
483 const int extent_rad = 2;
484 const IntVect crse_ratio = this->AMRRefRatioVect(amrlev-1);
485 BoxArray cba = this->m_grids[amrlev][0];
486 cba.coarsen(crse_ratio);
487 m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
488 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
489 }
490
491 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
492 {
493 const int in_rad = 0;
494 const int out_rad = 1;
495 const int extent_rad = 2;
496 const IntVect crse_ratio = this->AMRRefRatioVect(amrlev-1);
497 BoxArray cba = this->m_grids[amrlev][0];
498 cba.coarsen(crse_ratio);
499 m_crse_cor_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
500 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
501 m_crse_cor_br[amrlev]->setVal(RT(0.0));
502 }
503
504 // This has be to done after m_crse_cor_br is defined.
505 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
506 {
507 m_bndry_cor[amrlev] = std::make_unique<MLMGBndryT<MF>>
508 (this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, this->m_geom[amrlev][0]);
509 MF bc_data(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, 1);
510 bc_data.setVal(0.0);
511
512 m_bndry_cor[amrlev]->setBndryValues(*m_crse_cor_br[amrlev], 0, bc_data, 0, 0, ncomp,
513 this->AMRRefRatioVect(amrlev-1),
515 m_interpbndry_halfwidth);
516
517 Vector<Array<LinOpBCType,AMREX_SPACEDIM> > bclohi
518 (ncomp,Array<LinOpBCType,AMREX_SPACEDIM>{{AMREX_D_DECL(BCType::Dirichlet,
519 BCType::Dirichlet,
520 BCType::Dirichlet)}});
521 m_bndry_cor[amrlev]->setLOBndryConds(bclohi, bclohi, this->AMRRefRatioVect(amrlev-1), RealVect{});
522 }
523
524 m_bcondloc.resize(this->m_num_amr_levels);
525 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
526 {
527 m_bcondloc[amrlev].resize(this->m_num_mg_levels[amrlev]);
528 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
529 {
530 m_bcondloc[amrlev][mglev] = std::make_unique<BndryCondLoc>(this->m_grids[amrlev][mglev],
531 this->m_dmap[amrlev][mglev],
532 ncomp);
533 }
534 }
535}
536
537template <typename MF>
538void
539MLCellLinOpT<MF>::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* robinbc_a,
540 const MF* robinbc_b, const MF* robinbc_f)
541{
542 BL_PROFILE("MLCellLinOp::setLevelBC()");
543
544 AMREX_ALWAYS_ASSERT(amrlev >= 0 && amrlev < this->m_num_amr_levels);
545
546 const int ncomp = this->getNComp();
547
548 MF zero;
549 IntVect ng(1);
550 if (this->hasHiddenDimension()) { ng[this->hiddenDirection()] = 0; }
551 if (a_levelbcdata == nullptr) {
552 zero.define(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, ng);
553 zero.setVal(RT(0.0));
554 } else {
555 AMREX_ALWAYS_ASSERT(a_levelbcdata->nGrowVect().allGE(ng));
556 }
557 const MF& bcdata = (a_levelbcdata == nullptr) ? zero : *a_levelbcdata;
558
559 IntVect br_ref_ratio(-1);
560
561 if (amrlev == 0)
562 {
563 if (this->needsCoarseDataForBC())
564 {
565 // AMREX_ALWAYS_ASSERT(!this->hasHiddenDimension());
566 if (this->hasHiddenDimension()) {
567 int hidden_dir = this->hiddenDirection();
568 AMREX_ALWAYS_ASSERT(this->m_coarse_data_crse_ratio[hidden_dir] == 1);
569 }
570 br_ref_ratio = this->m_coarse_data_crse_ratio.allGT(0) ? this->m_coarse_data_crse_ratio : IntVect(2);
571 if (this->m_crse_sol_br[amrlev] == nullptr && br_ref_ratio.allGT(0))
572 {
573 const int in_rad = 0;
574 const int out_rad = 1;
575 const int extent_rad = 2;
576 const IntVect crse_ratio = br_ref_ratio;
577 BoxArray cba = this->m_grids[amrlev][0];
578 cba.coarsen(crse_ratio);
579 this->m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
580 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
581 }
582 if (this->m_coarse_data_for_bc != nullptr) {
583 AMREX_ALWAYS_ASSERT(this->m_coarse_data_crse_ratio.allGT(0));
584 const Box& cbx = amrex::coarsen(this->m_geom[0][0].Domain(), this->m_coarse_data_crse_ratio);
585 this->m_crse_sol_br[amrlev]->copyFrom(*(this->m_coarse_data_for_bc), 0, 0, 0, ncomp,
586 this->m_geom[0][0].periodicity(cbx));
587 } else {
588 this->m_crse_sol_br[amrlev]->setVal(RT(0.0));
589 }
590 this->m_bndry_sol[amrlev]->setBndryValues(*(this->m_crse_sol_br[amrlev]), 0,
591 bcdata, 0, 0, ncomp, br_ref_ratio,
593 this->m_interpbndry_halfwidth);
594 br_ref_ratio = this->m_coarse_data_crse_ratio;
595 }
596 else
597 {
598 this->m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
599 br_ref_ratio = IntVect(1);
600 }
601 }
602 else
603 {
604 this->m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
605 br_ref_ratio = this->AMRRefRatioVect(amrlev-1);
606 }
607
608 auto crse_fine_bc_type = (amrlev == 0) ? this->m_coarse_fine_bc_type : LinOpBCType::Dirichlet;
609 this->m_bndry_sol[amrlev]->setLOBndryConds(this->m_lobc, this->m_hibc, br_ref_ratio,
610 this->m_coarse_bc_loc, crse_fine_bc_type);
611
612 const Real* dx = this->m_geom[amrlev][0].CellSize();
613 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
614 {
615 this->m_bcondloc[amrlev][mglev]->setLOBndryConds(this->m_geom[amrlev][mglev], dx,
616 this->m_lobc, this->m_hibc,
617 br_ref_ratio, this->m_coarse_bc_loc,
618 this->m_domain_bloc_lo, this->m_domain_bloc_hi,
619 crse_fine_bc_type);
620 }
621
622 if (this->hasRobinBC()) {
623 AMREX_ASSERT(robinbc_a != nullptr && robinbc_b != nullptr && robinbc_f != nullptr);
624 this->m_robin_bcval[amrlev] = std::make_unique<MF>(this->m_grids[amrlev][0],
625 this->m_dmap[amrlev][0],
626 ncomp*3, 1);
627 const Box& domain = this->m_geom[amrlev][0].Domain();
628 MFItInfo mfi_info;
629 if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
630#ifdef AMREX_USE_OMP
631#pragma omp parallel if (Gpu::notInLaunchRegion())
632#endif
633 for (MFIter mfi(*(this->m_robin_bcval[amrlev]), mfi_info); mfi.isValid(); ++mfi) {
634 Box const& vbx = mfi.validbox();
635 Array4<RT const> const& ra = robinbc_a->const_array(mfi);
636 Array4<RT const> const& rb = robinbc_b->const_array(mfi);
637 Array4<RT const> const& rf = robinbc_f->const_array(mfi);
638 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
639 const Box& blo = amrex::adjCellLo(vbx, idim);
640 const Box& bhi = amrex::adjCellHi(vbx, idim);
641 bool outside_domain_lo = !(domain.contains(blo));
642 bool outside_domain_hi = !(domain.contains(bhi));
643 if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
644 for (int icomp = 0; icomp < ncomp; ++icomp) {
645 Array4<RT> const& rbc = (*(this->m_robin_bcval[amrlev]))[mfi].array(icomp*3);
646 if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
647 {
649 {
650 rbc(i,j,k,0) = ra(i,j,k,icomp);
651 rbc(i,j,k,1) = rb(i,j,k,icomp);
652 rbc(i,j,k,2) = rf(i,j,k,icomp);
653 });
654 }
655 if (this->m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
656 {
658 {
659 rbc(i,j,k,0) = ra(i,j,k,icomp);
660 rbc(i,j,k,1) = rb(i,j,k,icomp);
661 rbc(i,j,k,2) = rf(i,j,k,icomp);
662 });
663 }
664 }
665 }
666 }
667 }
668}
669
670template <typename MF>
671void
676
677template <typename MF>
678void
679MLCellLinOpT<MF>::updateSolBC (int amrlev, const MF& crse_bcdata) const
680{
681 BL_PROFILE("MLCellLinOp::updateSolBC()");
682
683 AMREX_ALWAYS_ASSERT(amrlev > 0);
684 const int ncomp = this->getNComp();
685 m_crse_sol_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
686 this->m_geom[amrlev-1][0].periodicity());
687 m_bndry_sol[amrlev]->updateBndryValues(*m_crse_sol_br[amrlev], 0, 0, ncomp,
688 this->AMRRefRatioVect(amrlev-1),
690 m_interpbndry_halfwidth);
691}
692
693template <typename MF>
694void
695MLCellLinOpT<MF>::updateCorBC (int amrlev, const MF& crse_bcdata) const
696{
697 BL_PROFILE("MLCellLinOp::updateCorBC()");
698 AMREX_ALWAYS_ASSERT(amrlev > 0);
699 const int ncomp = this->getNComp();
700 m_crse_cor_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
701 this->m_geom[amrlev-1][0].periodicity());
702 m_bndry_cor[amrlev]->updateBndryValues(*m_crse_cor_br[amrlev], 0, 0, ncomp,
703 this->AMRRefRatioVect(amrlev-1),
705 m_interpbndry_halfwidth);
706}
707
708template <typename MF>
709void
710MLCellLinOpT<MF>::applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode,
711 const MLMGBndryT<MF>* bndry, bool skip_fillboundary) const
712{
713 BL_PROFILE("MLCellLinOp::applyBC()");
714 // No coarsened boundary values, cannot apply inhomog at mglev>0.
715 BL_ASSERT(mglev == 0 || bc_mode == BCMode::Homogeneous);
716 BL_ASSERT(bndry != nullptr || bc_mode == BCMode::Homogeneous);
717
718 const int ncomp = this->getNComp();
719 const int cross = isCrossStencil();
720 const int tensorop = isTensorOp();
721 if (!skip_fillboundary) {
722 in.FillBoundary(0, ncomp, this->m_geom[amrlev][mglev].periodicity(), cross);
723 }
724
725 int flagbc = bc_mode == BCMode::Inhomogeneous;
726 const int imaxorder = this->maxorder;
727
728 const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
729 const RT dxi = static_cast<RT>(dxinv[0]);
730 const RT dyi = (AMREX_SPACEDIM >= 2) ? static_cast<RT>(dxinv[1]) : RT(1.0);
731 const RT dzi = (AMREX_SPACEDIM == 3) ? static_cast<RT>(dxinv[2]) : RT(1.0);
732
733 const auto& maskvals = m_maskvals[amrlev][mglev];
734 const auto& bcondloc = *m_bcondloc[amrlev][mglev];
735
736 FAB foofab(Box::TheUnitBox(),ncomp);
737 const auto& foo = foofab.const_array();
738
739 MFItInfo mfi_info;
740 if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
741
743 "non-cross stencil not support for gpu");
744
745 const int hidden_direction = this->hiddenDirection();
746
747#ifdef AMREX_USE_GPU
748 if ((cross || tensorop) && Gpu::inLaunchRegion())
749 {
750 if (! m_bc_tags[amrlev][mglev].is_defined()) {
752 tags.reserve(in.local_size()*2*AMREX_SPACEDIM*ncomp);
753 for (MFIter mfi(in); mfi.isValid(); ++mfi) {
754 const Box& vbx = mfi.validbox();
755 const auto & bdlv = bcondloc.bndryLocs(mfi);
756 const auto & bdcv = bcondloc.bndryConds(mfi);
757
758 const int local_index = mfi.LocalIndex();
759
760 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
761 if (idim != hidden_direction) {
762 const Orientation olo(idim,Orientation::low);
763 const Orientation ohi(idim,Orientation::high);
764 for (int icomp = 0; icomp < ncomp; ++icomp) {
765 tags.emplace_back(MLMGABCTag<RT>{
766 maskvals[olo].const_array(mfi),
767 bdlv[icomp][olo],
768 amrex::adjCell(vbx,olo),
769 bdcv[icomp][olo], vbx.length(idim),
770 icomp, olo, local_index
771 });
772 tags.emplace_back(MLMGABCTag<RT>{
773 maskvals[ohi].const_array(mfi),
774 bdlv[icomp][ohi],
775 amrex::adjCell(vbx,ohi),
776 bdcv[icomp][ohi], vbx.length(idim),
777 icomp, ohi, local_index
778 });
779 }
780 }
781 }
782 }
783 m_bc_tags[amrlev][mglev].define(tags);
784 }
785
787 Array<MultiArray4<RT const>, 2*AMREX_SPACEDIM> bndry_arrays;
788 for (OrientationIter oit; oit; ++oit) {
789 const Orientation ori = oit();
790 bndry_arrays[ori] = (bndry != nullptr) ?
791 bndry->bndryValues(ori).arrays() : foo_ma;
792 }
793
794 auto inma = in.arrays();
795 ParallelFor(m_bc_tags[amrlev][mglev],
796 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGABCTag<RT> const& tag) noexcept
797 {
798 const auto& bcval = bndry_arrays[tag.face][tag.local_index];
799 const int side = tag.face.faceDir();
800 if (tag.face.coordDir() == 0) {
801 mllinop_apply_bc_x(side, i, j, k, tag.blen, inma[tag.local_index],
802 tag.mask, tag.bctype, tag.bcloc, bcval,
803 imaxorder, dxi, flagbc, tag.comp);
804 }
805#if (AMREX_SPACEDIM > 1)
806 else
807#if (AMREX_SPACEDIM > 2)
808 if (tag.face.coordDir() == 1)
809#endif
810 {
811 mllinop_apply_bc_y(side, i, j, k, tag.blen, inma[tag.local_index],
812 tag.mask, tag.bctype, tag.bcloc, bcval,
813 imaxorder, dyi, flagbc, tag.comp);
814 }
815#if (AMREX_SPACEDIM > 2)
816 else {
817 mllinop_apply_bc_z(side, i, j, k, tag.blen, inma[tag.local_index],
818 tag.mask, tag.bctype, tag.bcloc, bcval,
819 imaxorder, dzi, flagbc, tag.comp);
820 }
821#endif
822#endif
823 });
824 } else
825#endif
826 if (cross || tensorop)
827 {
828#ifdef AMREX_USE_OMP
829#pragma omp parallel if (Gpu::notInLaunchRegion())
830#endif
831 for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
832 {
833 const Box& vbx = mfi.validbox();
834 const auto& iofab = in.array(mfi);
835
836 const auto & bdlv = bcondloc.bndryLocs(mfi);
837 const auto & bdcv = bcondloc.bndryConds(mfi);
838
839 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
840 {
841 if (hidden_direction == idim) { continue; }
842 const Orientation olo(idim,Orientation::low);
843 const Orientation ohi(idim,Orientation::high);
844 const Box blo = amrex::adjCellLo(vbx, idim);
845 const Box bhi = amrex::adjCellHi(vbx, idim);
846 const int blen = vbx.length(idim);
847 const auto& mlo = maskvals[olo].array(mfi);
848 const auto& mhi = maskvals[ohi].array(mfi);
849 const auto& bvlo = (bndry != nullptr) ? bndry->bndryValues(olo).const_array(mfi) : foo;
850 const auto& bvhi = (bndry != nullptr) ? bndry->bndryValues(ohi).const_array(mfi) : foo;
851 for (int icomp = 0; icomp < ncomp; ++icomp) {
852 const BoundCond bctlo = bdcv[icomp][olo];
853 const BoundCond bcthi = bdcv[icomp][ohi];
854 const RT bcllo = bdlv[icomp][olo];
855 const RT bclhi = bdlv[icomp][ohi];
856 if (idim == 0) {
857 mllinop_apply_bc_x(0, blo, blen, iofab, mlo,
858 bctlo, bcllo, bvlo,
859 imaxorder, dxi, flagbc, icomp);
860 mllinop_apply_bc_x(1, bhi, blen, iofab, mhi,
861 bcthi, bclhi, bvhi,
862 imaxorder, dxi, flagbc, icomp);
863 } else if (idim == 1) {
864 mllinop_apply_bc_y(0, blo, blen, iofab, mlo,
865 bctlo, bcllo, bvlo,
866 imaxorder, dyi, flagbc, icomp);
867 mllinop_apply_bc_y(1, bhi, blen, iofab, mhi,
868 bcthi, bclhi, bvhi,
869 imaxorder, dyi, flagbc, icomp);
870 } else {
871 mllinop_apply_bc_z(0, blo, blen, iofab, mlo,
872 bctlo, bcllo, bvlo,
873 imaxorder, dzi, flagbc, icomp);
874 mllinop_apply_bc_z(1, bhi, blen, iofab, mhi,
875 bcthi, bclhi, bvhi,
876 imaxorder, dzi, flagbc, icomp);
877 }
878 }
879 }
880 }
881 }
882 else
883 {
884#ifdef BL_NO_FORT
885 amrex::Abort("amrex_mllinop_apply_bc not available when BL_NO_FORT=TRUE");
886#else
887 if constexpr (std::is_same_v<Real,RT>) {
888#ifdef AMREX_USE_OMP
889#pragma omp parallel
890#endif
891 for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
892 {
893 const Box& vbx = mfi.validbox();
894
895 const auto & bdlv = bcondloc.bndryLocs(mfi);
896 const auto & bdcv = bcondloc.bndryConds(mfi);
897
898 const RealTuple & bdl = bdlv[0];
899 const BCTuple & bdc = bdcv[0];
900
901 for (OrientationIter oitr; oitr; ++oitr)
902 {
903 const Orientation ori = oitr();
904
905 int cdr = ori;
906 RT bcl = bdl[ori];
907 int bct = bdc[ori];
908
909 const auto& fsfab = (bndry != nullptr) ? bndry->bndryValues(ori)[mfi] : foofab;
910
911 const Mask& m = maskvals[ori][mfi];
912
914 BL_TO_FORTRAN_ANYD(in[mfi]),
916 cdr, bct, bcl,
917 BL_TO_FORTRAN_ANYD(fsfab),
918 imaxorder, dxinv, flagbc, ncomp, cross);
919 }
920 }
921 } else {
922 amrex::Abort("Not supported");
923 }
924#endif
925 }
926}
927
928template <typename MF>
930MLCellLinOpT<MF>::makeNGrids (int grid_size) const
931{
932 const Box& dombx = this->m_geom[0].back().Domain();
933
934 const BoxArray& old_ba = this->m_grids[0].back();
935 const int N = old_ba.size();
936 Vector<Box> bv;
937 bv.reserve(N);
938 for (int i = 0; i < N; ++i)
939 {
940 Box b = old_ba[i];
941 b.coarsen(grid_size);
942 b.refine(grid_size);
943 IntVect sz = b.size();
944 const IntVect nblks {AMREX_D_DECL(sz[0]/grid_size, sz[1]/grid_size, sz[2]/grid_size)};
945
946 IntVect big = b.smallEnd() + grid_size - 1;
947 b.setBig(big);
948
949#if (AMREX_SPACEDIM == 3)
950 for (int kk = 0; kk < nblks[2]; ++kk) {
951#endif
952#if (AMREX_SPACEDIM >= 2)
953 for (int jj = 0; jj < nblks[1]; ++jj) {
954#endif
955 for (int ii = 0; ii < nblks[0]; ++ii)
956 {
957 IntVect shft{AMREX_D_DECL(ii*grid_size,jj*grid_size,kk*grid_size)};
958 Box bb = amrex::shift(b,shft);
959 bb &= dombx;
960 bv.push_back(bb);
961 }
962#if (AMREX_SPACEDIM >= 2)
963 }
964#endif
965#if (AMREX_SPACEDIM == 3)
966 }
967#endif
968 }
969
970 std::sort(bv.begin(), bv.end());
971 bv.erase(std::unique(bv.begin(), bv.end()), bv.end());
972
973 BoxList bl(std::move(bv));
974
975 return BoxArray{std::move(bl)};
976}
977
978template <typename MF>
979void
980MLCellLinOpT<MF>::restriction (int amrlev, int cmglev, MF& crse, MF& fine) const
981{
982 const int ncomp = this->getNComp();
983 IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[cmglev-1];
984 amrex::average_down(fine, crse, 0, ncomp, ratio);
985}
986
987template <typename MF>
988void
989MLCellLinOpT<MF>::interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const
990{
991 const int ncomp = this->getNComp();
992
993 Dim3 ratio3 = {2,2,2};
994 IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
995 AMREX_D_TERM(ratio3.x = ratio[0];,
996 ratio3.y = ratio[1];,
997 ratio3.z = ratio[2];);
998
999#ifdef AMREX_USE_GPU
1000 if (Gpu::inLaunchRegion() && fine.isFusingCandidate()) {
1001 auto const& finema = fine.arrays();
1002 auto const& crsema = crse.const_arrays();
1003 ParallelFor(fine, IntVect(0), ncomp,
1004 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1005 {
1006 int ic = amrex::coarsen(i,ratio3.x);
1007 int jc = amrex::coarsen(j,ratio3.y);
1008 int kc = amrex::coarsen(k,ratio3.z);
1009 finema[box_no](i,j,k,n) += crsema[box_no](ic,jc,kc,n);
1010 });
1011 if (!Gpu::inNoSyncRegion()) {
1013 }
1014 } else
1015#endif
1016 {
1017#ifdef AMREX_USE_OMP
1018#pragma omp parallel if (Gpu::notInLaunchRegion())
1019#endif
1020 for (MFIter mfi(fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1021 {
1022 const Box& bx = mfi.tilebox();
1023 Array4<RT const> const& cfab = crse.const_array(mfi);
1024 Array4<RT> const& ffab = fine.array(mfi);
1025 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
1026 {
1027 int ic = amrex::coarsen(i,ratio3.x);
1028 int jc = amrex::coarsen(j,ratio3.y);
1029 int kc = amrex::coarsen(k,ratio3.z);
1030 ffab(i,j,k,n) += cfab(ic,jc,kc,n);
1031 });
1032 }
1033 }
1034}
1035
1036template <typename MF>
1037void
1038MLCellLinOpT<MF>::interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
1039{
1040 const int ncomp = this->getNComp();
1041
1042 const Geometry& crse_geom = this->Geom(amrlev,fmglev+1);
1043 const IntVect refratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
1044 const IntVect ng = crse.nGrowVect();
1045
1046 MF cfine;
1047 const MF* cmf;
1048
1050 {
1051 crse.FillBoundary(crse_geom.periodicity());
1052 cmf = &crse;
1053 }
1054 else
1055 {
1056 BoxArray cba = fine.boxArray();
1057 cba.coarsen(refratio);
1058 cfine.define(cba, fine.DistributionMap(), ncomp, ng);
1059 cfine.setVal(RT(0.0));
1060 cfine.ParallelCopy(crse, 0, 0, ncomp, IntVect(0), ng, crse_geom.periodicity());
1061 cmf = & cfine;
1062 }
1063
1064 bool isEB = fine.hasEBFabFactory();
1065 ignore_unused(isEB);
1066
1067#ifdef AMREX_USE_EB
1068 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(&(fine.Factory()));
1069 const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1070#endif
1071
1072 MFItInfo mfi_info;
1073 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1074#ifdef AMREX_USE_OMP
1075#pragma omp parallel if (Gpu::notInLaunchRegion())
1076#endif
1077 for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1078 {
1079 const Box& bx = mfi.tilebox();
1080 const auto& ff = fine.array(mfi);
1081 const auto& cc = cmf->array(mfi);
1082#ifdef AMREX_USE_EB
1083 bool call_lincc;
1084 if (isEB)
1085 {
1086 const auto& flag = (*flags)[mfi];
1087 if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1088 call_lincc = true;
1089 } else {
1090 Array4<EBCellFlag const> const& flg = flag.const_array();
1092 {
1093 mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1094 });
1095
1096 call_lincc = false;
1097 }
1098 }
1099 else
1100 {
1101 call_lincc = true;
1102 }
1103#else
1104 const bool call_lincc = true;
1105#endif
1106 if (call_lincc)
1107 {
1108#if (AMREX_SPACEDIM == 3)
1109 if (this->hasHiddenDimension()) {
1110 Box const& bx_2d = this->compactify(bx);
1111 auto const& ff_2d = this->compactify(ff);
1112 auto const& cc_2d = this->compactify(cc);
1114 {
1115 TwoD::mlmg_lin_cc_interp_r2(tbx, ff_2d, cc_2d, ncomp);
1116 });
1117 } else
1118#endif
1119 {
1121 {
1122 mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1123 });
1124 }
1125 }
1126 }
1127}
1128
1129template <typename MF>
1130void
1131MLCellLinOpT<MF>::interpolationAmr (int famrlev, MF& fine, const MF& crse,
1132 IntVect const& /*nghost*/) const
1133{
1134 const int ncomp = this->getNComp();
1135 const IntVect refratioV = this->AMRRefRatioVect(famrlev-1);
1136 // AMRRefRatioVect sets hidden direction to 1 and active directions to the
1137 // scalar ratio; max() cleanly recovers the active ratio in all cases.
1138 const int refratio = refratioV.max();
1139
1140#ifdef AMREX_USE_EB
1141 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(famrlev));
1142 const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1143#endif
1144
1145 MFItInfo mfi_info;
1146 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1147#ifdef AMREX_USE_OMP
1148#pragma omp parallel if (Gpu::notInLaunchRegion())
1149#endif
1150 for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1151 {
1152 const Box& bx = mfi.tilebox();
1153 auto const& ff = fine.array(mfi);
1154 auto const& cc = crse.const_array(mfi);
1155#ifdef AMREX_USE_EB
1156 bool call_lincc;
1157 if (factory)
1158 {
1159 const auto& flag = (*flags)[mfi];
1160 if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1161 call_lincc = true;
1162 } else {
1163 Array4<EBCellFlag const> const& flg = flag.const_array();
1164 switch(refratio) {
1165 case 2:
1166 {
1168 {
1169 mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1170 });
1171 break;
1172 }
1173 case 4:
1174 {
1176 {
1177 mlmg_eb_cc_interp_r<4>(tbx, ff, cc, flg, ncomp);
1178 });
1179 break;
1180 }
1181 default:
1182 amrex::Abort("mlmg_eb_cc_interp: only refratio 2 and 4 are supported");
1183 }
1184
1185 call_lincc = false;
1186 }
1187 }
1188 else
1189 {
1190 call_lincc = true;
1191 }
1192#else
1193 const bool call_lincc = true;
1194#endif
1195 if (call_lincc)
1196 {
1197#if (AMREX_SPACEDIM == 3)
1198 if (this->hasHiddenDimension()) {
1199 Box const& bx_2d = this->compactify(bx);
1200 auto const& ff_2d = this->compactify(ff);
1201 auto const& cc_2d = this->compactify(cc);
1202 switch(refratio) {
1203 case 2:
1204 {
1206 {
1207 TwoD::mlmg_lin_cc_interp_r2(tbx, ff_2d, cc_2d, ncomp);
1208 });
1209 break;
1210 }
1211 case 4:
1212 {
1214 {
1215 TwoD::mlmg_lin_cc_interp_r4(tbx, ff_2d, cc_2d, ncomp);
1216 });
1217 break;
1218 }
1219 default:
1220 amrex::Abort("mlmg_lin_cc_interp: only refratio 2 and 4 are supported");
1221 }
1222 } else
1223#endif
1224 {
1225 switch(refratio) {
1226 case 2:
1227 {
1229 {
1230 mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1231 });
1232 break;
1233 }
1234 case 4:
1235 {
1237 {
1238 mlmg_lin_cc_interp_r4(tbx, ff, cc, ncomp);
1239 });
1240 break;
1241 }
1242 default:
1243 amrex::Abort("mlmg_lin_cc_interp: only refratio 2 and 4 are supported");
1244 }
1245 }
1246 }
1247 }
1248}
1249
1250template <typename MF>
1251void
1252MLCellLinOpT<MF>::averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
1253 const MF& fine_sol, const MF& fine_rhs)
1254{
1255 const auto amrrr = this->AMRRefRatioVect(camrlev);
1256 const int ncomp = this->getNComp();
1257 amrex::average_down(fine_sol, crse_sol, 0, ncomp, amrrr);
1258 amrex::average_down(fine_rhs, crse_rhs, 0, ncomp, amrrr);
1259}
1260
1261template <typename MF>
1262void
1263MLCellLinOpT<MF>::apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
1264 StateMode s_mode, const MLMGBndryT<MF>* bndry) const
1265{
1266 BL_PROFILE("MLCellLinOp::apply()");
1267 applyBC(amrlev, mglev, in, bc_mode, s_mode, bndry);
1268 Fapply(amrlev, mglev, out, in);
1269}
1270
1271template <typename MF>
1272void
1273MLCellLinOpT<MF>::smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
1274 bool skip_fillboundary, int niter) const
1275{
1276 BL_PROFILE("MLCellLinOp::smooth()");
1277 for (int i = 0; i < niter; ++i) {
1278 for (int redblack = 0; redblack < 2; ++redblack)
1279 {
1280 applyBC(amrlev, mglev, sol, BCMode::Homogeneous, StateMode::Solution,
1281 nullptr, skip_fillboundary);
1282 Fsmooth(amrlev, mglev, sol, rhs, redblack);
1283 skip_fillboundary = false;
1284 }
1285 }
1286}
1287
1288template <typename MF>
1289void
1290MLCellLinOpT<MF>::solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
1291 const MF* crse_bcdata)
1292{
1293 BL_PROFILE("MLCellLinOp::solutionResidual()");
1294 const int ncomp = this->getNComp();
1295 if (crse_bcdata != nullptr) {
1296 updateSolBC(amrlev, *crse_bcdata);
1297 }
1298 const int mglev = 0;
1299 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Solution,
1300 m_bndry_sol[amrlev].get());
1301
1302 AMREX_ASSERT(resid.nComp() == b.nComp());
1303 MF::Xpay(resid, RT(-1.0), b, 0, 0, ncomp, IntVect(0));
1304}
1305
1306template <typename MF>
1307void
1308MLCellLinOpT<MF>::prepareForFluxes (int amrlev, const MF* crse_bcdata)
1309{
1310 if (crse_bcdata != nullptr) {
1311 updateSolBC(amrlev, *crse_bcdata);
1312 }
1313}
1314
1315template <typename MF>
1316void
1317MLCellLinOpT<MF>::correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
1318 BCMode bc_mode, const MF* crse_bcdata)
1319{
1320 BL_PROFILE("MLCellLinOp::correctionResidual()");
1321 const int ncomp = this->getNComp();
1322 if (bc_mode == BCMode::Inhomogeneous)
1323 {
1324 if (crse_bcdata)
1325 {
1326 AMREX_ASSERT(mglev == 0 && amrlev > 0);
1327 updateCorBC(amrlev, *crse_bcdata);
1328 }
1329 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Correction,
1330 m_bndry_cor[amrlev].get());
1331 }
1332 else
1333 {
1334 AMREX_ASSERT(crse_bcdata == nullptr);
1335 apply(amrlev, mglev, resid, x, BCMode::Homogeneous, StateMode::Correction, nullptr);
1336 }
1337
1338 MF::Xpay(resid, Real(-1.0), b, 0, 0, ncomp, IntVect(0));
1339}
1340
1341template <typename MF>
1342void
1343MLCellLinOpT<MF>::reflux (int crse_amrlev, MF& res, const MF& crse_sol, const MF&,
1344 MF&, MF& fine_sol, const MF&) const
1345{
1346 BL_PROFILE("MLCellLinOp::reflux()");
1347
1348 auto& fluxreg = m_fluxreg[crse_amrlev];
1349 fluxreg.reset();
1350
1351 const int ncomp = this->getNComp();
1352
1353 const int fine_amrlev = crse_amrlev+1;
1354
1355 Real dt = Real(1.0);
1356 const Real* crse_dx = this->m_geom[crse_amrlev][0].CellSize();
1357 const Real* fine_dx = this->m_geom[fine_amrlev][0].CellSize();
1358
1359 const int mglev = 0;
1360 applyBC(fine_amrlev, mglev, fine_sol, BCMode::Inhomogeneous, StateMode::Solution,
1361 m_bndry_sol[fine_amrlev].get());
1362
1363 MFItInfo mfi_info;
1364 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1365
1366#ifdef AMREX_USE_OMP
1367#pragma omp parallel if (Gpu::notInLaunchRegion())
1368#endif
1369 {
1371 Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1372 Array<FAB const*,AMREX_SPACEDIM> cpflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1373
1374 for (MFIter mfi(crse_sol, mfi_info); mfi.isValid(); ++mfi)
1375 {
1376 if (fluxreg.CrseHasWork(mfi))
1377 {
1378 const Box& tbx = mfi.tilebox();
1379 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1380 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1381 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1382 FFlux(crse_amrlev, mfi, pflux, crse_sol[mfi], Location::FaceCentroid);
1383 fluxreg.CrseAdd(mfi, cpflux, crse_dx, dt, RunOn::Gpu);
1384 }
1385 }
1386
1387#ifdef AMREX_USE_OMP
1388#pragma omp barrier
1389#endif
1390
1391 for (MFIter mfi(fine_sol, mfi_info); mfi.isValid(); ++mfi)
1392 {
1393 if (fluxreg.FineHasWork(mfi))
1394 {
1395 const Box& tbx = mfi.tilebox();
1396 const int face_only = true;
1397 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1398 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1399 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1400 FFlux(fine_amrlev, mfi, pflux, fine_sol[mfi], Location::FaceCentroid, face_only);
1401 fluxreg.FineAdd(mfi, cpflux, fine_dx, dt, RunOn::Gpu);
1402 }
1403 }
1404 }
1405
1406 fluxreg.Reflux(res);
1407}
1408
1409template <typename MF>
1410void
1412 MF& sol, Location loc) const
1413{
1414 BL_PROFILE("MLCellLinOp::compFlux()");
1415
1416 const int mglev = 0;
1417 const int ncomp = this->getNComp();
1418 applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1419 m_bndry_sol[amrlev].get());
1420
1421 MFItInfo mfi_info;
1422 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1423
1424#ifdef AMREX_USE_OMP
1425#pragma omp parallel if (Gpu::notInLaunchRegion())
1426#endif
1427 {
1429 Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1430 for (MFIter mfi(sol, mfi_info); mfi.isValid(); ++mfi)
1431 {
1432 const Box& tbx = mfi.tilebox();
1433 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1434 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1435 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1436 FFlux(amrlev, mfi, pflux, sol[mfi], loc);
1437 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1438 const Box& nbx = mfi.nodaltilebox(idim);
1439 auto const& dst = fluxes[idim]->array(mfi);
1440 auto const& src = pflux[idim]->const_array();
1441 AMREX_HOST_DEVICE_PARALLEL_FOR_4D (nbx, ncomp, i, j, k, n,
1442 {
1443 dst(i,j,k,n) = src(i,j,k,n);
1444 });
1445 }
1446 }
1447 }
1448}
1449
1450template <typename MF>
1451void
1453 MF& sol, Location /*loc*/) const
1454{
1455 BL_PROFILE("MLCellLinOp::compGrad()");
1456
1457 if (sol.nComp() > 1) {
1458 amrex::Abort("MLCellLinOp::compGrad called, but only works for single-component solves");
1459 }
1460
1461 const int mglev = 0;
1462 applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1463 m_bndry_sol[amrlev].get());
1464
1465 const int ncomp = this->getNComp();
1466
1467 AMREX_D_TERM(const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));,
1468 const RT dyi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1));,
1469 const RT dzi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)););
1470#ifdef AMREX_USE_OMP
1471#pragma omp parallel if (Gpu::notInLaunchRegion())
1472#endif
1473 for (MFIter mfi(sol, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1474 {
1475 AMREX_D_TERM(const Box& xbx = mfi.nodaltilebox(0);,
1476 const Box& ybx = mfi.nodaltilebox(1);,
1477 const Box& zbx = mfi.nodaltilebox(2););
1478 const auto& s = sol.array(mfi);
1479 AMREX_D_TERM(const auto& gx = grad[0]->array(mfi);,
1480 const auto& gy = grad[1]->array(mfi);,
1481 const auto& gz = grad[2]->array(mfi););
1482
1483 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( xbx, ncomp, i, j, k, n,
1484 {
1485 gx(i,j,k,n) = dxi*(s(i,j,k,n) - s(i-1,j,k,n));
1486 });
1487#if (AMREX_SPACEDIM >= 2)
1488 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( ybx, ncomp, i, j, k, n,
1489 {
1490 gy(i,j,k,n) = dyi*(s(i,j,k,n) - s(i,j-1,k,n));
1491 });
1492#endif
1493#if (AMREX_SPACEDIM == 3)
1494 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( zbx, ncomp, i, j, k, n,
1495 {
1496 gz(i,j,k,n) = dzi*(s(i,j,k,n) - s(i,j,k-1,n));
1497 });
1498#endif
1499 }
1500
1501 addInhomogNeumannFlux(amrlev, grad, sol, false);
1502}
1503
1504template <typename MF>
1505void
1506MLCellLinOpT<MF>::applyMetricTerm (int amrlev, int mglev, MF& rhs) const
1507{
1508 amrex::ignore_unused(amrlev,mglev,rhs);
1509#if (AMREX_SPACEDIM != 3)
1510 if (!m_has_metric_term) { return; }
1511
1512 const int ncomp = rhs.nComp();
1513
1514 bool cc = rhs.ixType().cellCentered(0);
1515
1516 const Geometry& geom = this->m_geom[amrlev][mglev];
1517 const RT dx = static_cast<RT>(geom.CellSize(0));
1518 const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1519
1520#ifdef AMREX_USE_OMP
1521#pragma omp parallel if (Gpu::notInLaunchRegion())
1522#endif
1523 for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1524 {
1525 const Box& tbx = mfi.tilebox();
1526 auto const& rhsarr = rhs.array(mfi);
1527#if (AMREX_SPACEDIM == 1)
1528 if (cc) {
1529 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1530 {
1531 RT rc = probxlo + (i+RT(0.5))*dx;
1532 rhsarr(i,j,k,n) *= rc*rc;
1533 });
1534 } else {
1535 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1536 {
1537 RT re = probxlo + i*dx;
1538 rhsarr(i,j,k,n) *= re*re;
1539 });
1540 }
1541#elif (AMREX_SPACEDIM == 2)
1542 if (cc) {
1543 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1544 {
1545 RT rc = probxlo + (i+RT(0.5))*dx;
1546 rhsarr(i,j,k,n) *= rc;
1547 });
1548 } else {
1549 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1550 {
1551 RT re = probxlo + i*dx;
1552 rhsarr(i,j,k,n) *= re;
1553 });
1554 }
1555#endif
1556 }
1557#endif
1558}
1559
1560template <typename MF>
1561void
1562MLCellLinOpT<MF>::unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const
1563{
1564 amrex::ignore_unused(amrlev,mglev,rhs);
1565#if (AMREX_SPACEDIM != 3)
1566 if (!m_has_metric_term) { return; }
1567
1568 const int ncomp = rhs.nComp();
1569
1570 bool cc = rhs.ixType().cellCentered(0);
1571
1572 const Geometry& geom = this->m_geom[amrlev][mglev];
1573 const RT dx = static_cast<RT>(geom.CellSize(0));
1574 const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1575
1576#ifdef AMREX_USE_OMP
1577#pragma omp parallel if (Gpu::notInLaunchRegion())
1578#endif
1579 for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1580 {
1581 const Box& tbx = mfi.tilebox();
1582 auto const& rhsarr = rhs.array(mfi);
1583#if (AMREX_SPACEDIM == 1)
1584 if (cc) {
1585 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1586 {
1587 RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1588 rhsarr(i,j,k,n) *= rcinv*rcinv;
1589 });
1590 } else {
1591 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1592 {
1593 RT re = probxlo + i*dx;
1594 RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1595 rhsarr(i,j,k,n) *= reinv*reinv;
1596 });
1597 }
1598#elif (AMREX_SPACEDIM == 2)
1599 if (cc) {
1600 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1601 {
1602 RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1603 rhsarr(i,j,k,n) *= rcinv;
1604 });
1605 } else {
1606 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1607 {
1608 RT re = probxlo + i*dx;
1609 RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1610 rhsarr(i,j,k,n) *= reinv;
1611 });
1612 }
1613#endif
1614 }
1615#endif
1616}
1617
1618template <typename MF>
1619auto
1620MLCellLinOpT<MF>::getSolvabilityOffset (int amrlev, int mglev, MF const& rhs) const
1621 -> Vector<RT>
1622{
1623 computeVolInv();
1624
1625 const int ncomp = this->getNComp();
1626 Vector<RT> offset(ncomp);
1627
1628#ifdef AMREX_USE_EB
1629 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
1630 if (factory && !factory->isAllRegular())
1631 {
1632 if constexpr (std::is_same<MF,MultiFab>()) {
1633 const MultiFab& vfrac = factory->getVolFrac();
1634 for (int c = 0; c < ncomp; ++c) {
1635 offset[c] = amrex::Dot(rhs, c, vfrac, 0, 1, IntVect(0), true)
1636 * m_volinv[amrlev][mglev];
1637 }
1638 } else {
1639 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1640 }
1641 }
1642 else
1643#endif
1644 {
1645 for (int c = 0; c < ncomp; ++c) {
1646 offset[c] = rhs.sum(c,IntVect(0),true) * m_volinv[amrlev][mglev];
1647 }
1648 }
1649
1651
1652 return offset;
1653}
1654
1655template <typename MF>
1656void
1657MLCellLinOpT<MF>::fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& rhs,
1658 Vector<RT> const& offset) const
1659{
1660 const int ncomp = this->getNComp();
1661 for (int c = 0; c < ncomp; ++c) {
1662 rhs.plus(-offset[c], c, 1);
1663 }
1664#ifdef AMREX_USE_EB
1665 if (!rhs.isAllRegular()) {
1666 if constexpr (std::is_same<MF,MultiFab>()) {
1667 amrex::EB_set_covered(rhs, 0, ncomp, 0, 0.0_rt);
1668 } else {
1669 amrex::Abort("amrex::EB_set_covered only works with MultiFab");
1670 }
1671 }
1672#endif
1673}
1674
1675template <typename MF>
1676void
1678{
1679 BL_PROFILE("MLCellLinOp::prepareForSolve()");
1680
1681 const int imaxorder = this->maxorder;
1682 const int ncomp = this->getNComp();
1683 const int hidden_direction = this->hiddenDirection();
1684 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
1685 {
1686 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
1687 {
1688 const auto& bcondloc = *m_bcondloc[amrlev][mglev];
1689 const auto& maskvals = m_maskvals[amrlev][mglev];
1690
1691 const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));
1692 const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? this->m_geom[amrlev][mglev].InvCellSize(1) : Real(1.0));
1693 const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? this->m_geom[amrlev][mglev].InvCellSize(2) : Real(1.0));
1694
1695 auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
1696 MF foo(this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev], ncomp, 0, MFInfo().SetAlloc(false));
1697
1698#ifdef AMREX_USE_EB
1699 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->m_factory[amrlev][mglev].get());
1700 const FabArray<EBCellFlagFab>* flags =
1701 (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1702 auto area = (factory) ? factory->getAreaFrac()
1703 : Array<const MultiCutFab*,AMREX_SPACEDIM>{AMREX_D_DECL(nullptr,nullptr,nullptr)};
1705#endif
1706
1707#ifdef AMREX_USE_GPU
1708 if (Gpu::inLaunchRegion()) {
1709#ifdef AMREX_USE_EB
1710 if (factory && !factory->isAllRegular()) {
1711#if defined(AMREX_USE_CUDA) && defined(_WIN32)
1712 if (!std::is_same<MF,MultiFab>()) {
1713#else
1714 if constexpr (!std::is_same<MF,MultiFab>()) {
1715#endif
1716 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1717 } else {
1719 tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1720
1721 for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1722 {
1723 const Box& vbx = mfi.validbox();
1724
1725 const auto & bdlv = bcondloc.bndryLocs(mfi);
1726 const auto & bdcv = bcondloc.bndryConds(mfi);
1727
1728 auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1729
1730 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1731 {
1732 if (idim != hidden_direction && fabtyp != FabType::covered) {
1733 const Orientation olo(idim,Orientation::low);
1734 const Orientation ohi(idim,Orientation::high);
1735 auto const& ap = (fabtyp == FabType::singlevalued)
1736 ? area[idim]->const_array(mfi) : Array4<Real const>{};
1737 for (int icomp = 0; icomp < ncomp; ++icomp) {
1738 tags.emplace_back(MLMGPSEBTag<RT>{undrrelxr[olo].array(mfi),
1739 undrrelxr[ohi].array(mfi),
1740 ap,
1741 maskvals[olo].const_array(mfi),
1742 maskvals[ohi].const_array(mfi),
1743 bdlv[icomp][olo], bdlv[icomp][ohi],
1744 amrex::adjCell(vbx,olo),
1745 bdcv[icomp][olo], bdcv[icomp][ohi],
1746 vbx.length(idim), icomp, idim});
1747 }
1748 }
1749 }
1750 }
1751
1752 ParallelFor(tags,
1753 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSEBTag<RT> const& tag) noexcept
1754 {
1755 if (tag.ap) {
1756 if (tag.dir == 0)
1757 {
1758 mllinop_comp_interp_coef0_x_eb
1759 (0, i , j, k, tag.blen, tag.flo, tag.mlo, tag.ap,
1760 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1761 mllinop_comp_interp_coef0_x_eb
1762 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1763 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1764 }
1765#if (AMREX_SPACEDIM > 1)
1766 else
1767#if (AMREX_SPACEDIM > 2)
1768 if (tag.dir == 1)
1769#endif
1770 {
1771 mllinop_comp_interp_coef0_y_eb
1772 (0, i, j , k, tag.blen, tag.flo, tag.mlo, tag.ap,
1773 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1774 mllinop_comp_interp_coef0_y_eb
1775 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1776 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1777 }
1778#if (AMREX_SPACEDIM > 2)
1779 else {
1780 mllinop_comp_interp_coef0_z_eb
1781 (0, i, j, k , tag.blen, tag.flo, tag.mlo, tag.ap,
1782 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1783 mllinop_comp_interp_coef0_z_eb
1784 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi, tag.ap,
1785 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1786 }
1787#endif
1788#endif
1789 } else {
1790 if (tag.dir == 0)
1791 {
1792 mllinop_comp_interp_coef0_x
1793 (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1794 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1795 mllinop_comp_interp_coef0_x
1796 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1797 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1798 }
1799#if (AMREX_SPACEDIM > 1)
1800 else
1801#if (AMREX_SPACEDIM > 2)
1802 if (tag.dir == 1)
1803#endif
1804 {
1805 mllinop_comp_interp_coef0_y
1806 (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1807 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1808 mllinop_comp_interp_coef0_y
1809 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1810 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1811 }
1812#if (AMREX_SPACEDIM > 2)
1813 else {
1814 mllinop_comp_interp_coef0_z
1815 (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1816 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1817 mllinop_comp_interp_coef0_z
1818 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1819 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1820 }
1821#endif
1822#endif
1823 }
1824 });
1825 }
1826 } else
1827#endif
1828 {
1830 tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1831
1832 for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1833 {
1834 const Box& vbx = mfi.validbox();
1835
1836 const auto & bdlv = bcondloc.bndryLocs(mfi);
1837 const auto & bdcv = bcondloc.bndryConds(mfi);
1838
1839 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1840 {
1841 if (idim != hidden_direction) {
1842 const Orientation olo(idim,Orientation::low);
1843 const Orientation ohi(idim,Orientation::high);
1844 for (int icomp = 0; icomp < ncomp; ++icomp) {
1845 tags.emplace_back(MLMGPSTag<RT>{undrrelxr[olo].array(mfi),
1846 undrrelxr[ohi].array(mfi),
1847 maskvals[olo].const_array(mfi),
1848 maskvals[ohi].const_array(mfi),
1849 bdlv[icomp][olo], bdlv[icomp][ohi],
1850 amrex::adjCell(vbx,olo),
1851 bdcv[icomp][olo], bdcv[icomp][ohi],
1852 vbx.length(idim), icomp, idim});
1853 }
1854 }
1855 }
1856 }
1857
1858 ParallelFor(tags,
1859 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSTag<RT> const& tag) noexcept
1860 {
1861 if (tag.dir == 0)
1862 {
1863 mllinop_comp_interp_coef0_x
1864 (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1865 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1866 mllinop_comp_interp_coef0_x
1867 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1868 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1869 }
1870#if (AMREX_SPACEDIM > 1)
1871 else
1872#if (AMREX_SPACEDIM > 2)
1873 if (tag.dir == 1)
1874#endif
1875 {
1876 mllinop_comp_interp_coef0_y
1877 (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1878 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1879 mllinop_comp_interp_coef0_y
1880 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1881 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1882 }
1883#if (AMREX_SPACEDIM > 2)
1884 else {
1885 mllinop_comp_interp_coef0_z
1886 (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1887 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1888 mllinop_comp_interp_coef0_z
1889 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1890 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1891 }
1892#endif
1893#endif
1894 });
1895 }
1896 } else
1897#endif
1898 {
1899#ifdef AMREX_USE_OMP
1900#pragma omp parallel
1901#endif
1902 for (MFIter mfi(foo, MFItInfo{}.SetDynamic(true)); mfi.isValid(); ++mfi)
1903 {
1904 const Box& vbx = mfi.validbox();
1905
1906 const auto & bdlv = bcondloc.bndryLocs(mfi);
1907 const auto & bdcv = bcondloc.bndryConds(mfi);
1908
1909#ifdef AMREX_USE_EB
1910 auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1911#endif
1912 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1913 {
1914 if (idim == hidden_direction) { continue; }
1915 const Orientation olo(idim,Orientation::low);
1916 const Orientation ohi(idim,Orientation::high);
1917 const Box blo = amrex::adjCellLo(vbx, idim);
1918 const Box bhi = amrex::adjCellHi(vbx, idim);
1919 const int blen = vbx.length(idim);
1920 const auto& mlo = maskvals[olo].array(mfi);
1921 const auto& mhi = maskvals[ohi].array(mfi);
1922 const auto& flo = undrrelxr[olo].array(mfi);
1923 const auto& fhi = undrrelxr[ohi].array(mfi);
1924 for (int icomp = 0; icomp < ncomp; ++icomp) {
1925 const BoundCond bctlo = bdcv[icomp][olo];
1926 const BoundCond bcthi = bdcv[icomp][ohi];
1927 const auto bcllo = bdlv[icomp][olo];
1928 const auto bclhi = bdlv[icomp][ohi];
1929#ifdef AMREX_USE_EB
1930 if (fabtyp == FabType::singlevalued) {
1931 if constexpr (!std::is_same<MF,MultiFab>()) {
1932 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1933 } else {
1934 auto const& ap = area[idim]->const_array(mfi);
1935 if (idim == 0) {
1936 mllinop_comp_interp_coef0_x_eb
1937 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1938 imaxorder, dxi, icomp);
1939 mllinop_comp_interp_coef0_x_eb
1940 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1941 imaxorder, dxi, icomp);
1942 } else if (idim == 1) {
1943 mllinop_comp_interp_coef0_y_eb
1944 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1945 imaxorder, dyi, icomp);
1946 mllinop_comp_interp_coef0_y_eb
1947 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1948 imaxorder, dyi, icomp);
1949 } else {
1950 mllinop_comp_interp_coef0_z_eb
1951 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1952 imaxorder, dzi, icomp);
1953 mllinop_comp_interp_coef0_z_eb
1954 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1955 imaxorder, dzi, icomp);
1956 }
1957 }
1958 } else if (fabtyp == FabType::regular)
1959#endif
1960 {
1961 if (idim == 0) {
1962 mllinop_comp_interp_coef0_x
1963 (0, blo, blen, flo, mlo, bctlo, bcllo,
1964 imaxorder, dxi, icomp);
1965 mllinop_comp_interp_coef0_x
1966 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1967 imaxorder, dxi, icomp);
1968 } else if (idim == 1) {
1969 mllinop_comp_interp_coef0_y
1970 (0, blo, blen, flo, mlo, bctlo, bcllo,
1971 imaxorder, dyi, icomp);
1972 mllinop_comp_interp_coef0_y
1973 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1974 imaxorder, dyi, icomp);
1975 } else {
1976 mllinop_comp_interp_coef0_z
1977 (0, blo, blen, flo, mlo, bctlo, bcllo,
1978 imaxorder, dzi, icomp);
1979 mllinop_comp_interp_coef0_z
1980 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1981 imaxorder, dzi, icomp);
1982 }
1983 }
1984 }
1985 }
1986 }
1987 }
1988 }
1989 }
1990}
1991
1992template <typename MF>
1993auto
1994MLCellLinOpT<MF>::xdoty (int /*amrlev*/, int /*mglev*/, const MF& x, const MF& y, bool local) const
1995 -> RT
1996{
1997 const int ncomp = this->getNComp();
1998 const IntVect nghost(0);
1999 RT result = amrex::Dot(x,0,y,0,ncomp,nghost,true);
2000 if (!local) {
2002 }
2003 return result;
2004}
2005
2006template <typename MF>
2007auto
2009{
2010 const int ncomp = this->getNComp();
2011 const IntVect nghost(0);
2012 RT result = 0;
2013 for (int ilev = 0; ilev < this->NAMRLevels()-1; ++ilev) {
2014 result += amrex::Dot(*m_norm_fine_mask[ilev], *x[ilev], 0, *y[ilev], 0, ncomp, nghost, true);
2015 }
2016 result += amrex::Dot(*x[this->NAMRLevels()-1], 0,
2017 *y[this->NAMRLevels()-1], 0, ncomp, nghost, true);
2019 return result;
2020}
2021
2022template <typename MF>
2023auto
2025{
2026 const int ncomp = this->getNComp();
2027 const IntVect nghost(0);
2028 RT result = 0;
2029 for (int ilev = 0; ilev < this->NAMRLevels()-1; ++ilev) {
2030 result += amrex::Dot(*m_norm_fine_mask[ilev], *x[ilev], 0, ncomp, nghost, true);
2031 }
2032 result += amrex::Dot(*x[this->NAMRLevels()-1], 0, ncomp, nghost, true);
2034 return std::sqrt(result);
2035}
2036
2037template <typename MF>
2038void
2040{
2041 if (!m_volinv.empty()) { return; }
2042
2043 m_volinv.resize(this->m_num_amr_levels);
2044 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2045 m_volinv[amrlev].resize(this->NMGLevels(amrlev));
2046 }
2047
2048 // We don't need to compute for every level
2049
2050 auto f = [&] (int amrlev, int mglev) {
2051#ifdef AMREX_USE_EB
2052 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
2053 if (factory && !factory->isAllRegular())
2054 {
2055 if constexpr (std::is_same<MF,MultiFab>()) {
2056 const auto& vfrac = factory->getVolFrac();
2057 m_volinv[amrlev][mglev] = vfrac.sum(0,true);
2058 } else {
2059 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
2060 }
2061 }
2062 else
2063#endif
2064 {
2065 auto const npts = (this->m_coarse_fine_bc_type == LinOpBCType::Dirichlet)
2066 ? this->compactify(this->Geom(amrlev,mglev).Domain()).d_numPts()
2067 : this->m_grids[amrlev][mglev].d_numPts();
2068 AMREX_ASSERT(npts > 0.);
2069 m_volinv[amrlev][mglev] = RT(1.0 / npts);
2070 }
2071 };
2072
2073 // amrlev = 0, mglev = 0
2074 f(0,0);
2075
2076 int mgbottom = this->NMGLevels(0)-1;
2077 f(0,mgbottom);
2078
2079#ifdef AMREX_USE_EB
2080 RT temp1, temp2;
2081 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(0,0));
2082 if (factory && !factory->isAllRegular())
2083 {
2084 ParallelAllReduce::Sum<RT>({m_volinv[0][0], m_volinv[0][mgbottom]},
2086 temp1 = RT(1.0)/m_volinv[0][0];
2087 temp2 = RT(1.0)/m_volinv[0][mgbottom];
2088 }
2089 else
2090 {
2091 temp1 = m_volinv[0][0];
2092 temp2 = m_volinv[0][mgbottom];
2093 }
2094 m_volinv[0][0] = temp1;
2095 m_volinv[0][mgbottom] = temp2;
2096#endif
2097}
2098
2099template <typename MF>
2100auto
2101MLCellLinOpT<MF>::normInf (int amrlev, MF const& mf, bool local) const -> RT
2102{
2103 const int ncomp = this->getNComp();
2104 const int finest_level = this->NAMRLevels() - 1;
2105 RT norm = RT(0.0);
2106#ifdef AMREX_USE_EB
2107 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev));
2108 if (factory && !factory->isAllRegular()) {
2109#if defined(AMREX_USE_CUDA) && defined(_WIN32)
2110 if (!std::is_same<MF,MultiFab>()) {
2111#else
2112 if constexpr (!std::is_same<MF,MultiFab>()) {
2113#endif
2114 amrex::Abort("MLCellLinOpT with EB only works with MultiFab");
2115 } else {
2116 const MultiFab& vfrac = factory->getVolFrac();
2117 if (amrlev == finest_level) {
2118#ifdef AMREX_USE_GPU
2119 if (Gpu::inLaunchRegion()) {
2120 auto const& ma = mf.const_arrays();
2121 auto const& vfrac_ma = vfrac.const_arrays();
2123 mf, IntVect(0), ncomp,
2124 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2126 {
2127 return std::abs(ma[box_no](i,j,k,n)
2128 *vfrac_ma[box_no](i,j,k));
2129 });
2130 } else
2131#endif
2132 {
2133#ifdef AMREX_USE_OMP
2134#pragma omp parallel reduction(max:norm)
2135#endif
2136 for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2137 Box const& bx = mfi.tilebox();
2138 auto const& fab = mf.const_array(mfi);
2139 auto const& v = vfrac.const_array(mfi);
2140 AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2141 {
2142 norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2143 });
2144 }
2145 }
2146 } else {
2147#ifdef AMREX_USE_GPU
2148 if (Gpu::inLaunchRegion()) {
2149 auto const& ma = mf.const_arrays();
2150 auto const& mask_ma = m_norm_fine_mask[amrlev]->const_arrays();
2151 auto const& vfrac_ma = vfrac.const_arrays();
2153 mf, IntVect(0), ncomp,
2154 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2156 {
2157 if (mask_ma[box_no](i,j,k)) {
2158 return std::abs(ma[box_no](i,j,k,n)
2159 *vfrac_ma[box_no](i,j,k));
2160 } else {
2161 return Real(0.0);
2162 }
2163 });
2164 } else
2165#endif
2166 {
2167#ifdef AMREX_USE_OMP
2168#pragma omp parallel reduction(max:norm)
2169#endif
2170 for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2171 Box const& bx = mfi.tilebox();
2172 auto const& fab = mf.const_array(mfi);
2173 auto const& mask = m_norm_fine_mask[amrlev]->const_array(mfi);
2174 auto const& v = vfrac.const_array(mfi);
2175 AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2176 {
2177 if (mask(i,j,k)) {
2178 norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2179 }
2180 });
2181 }
2182 }
2183 }
2184 }
2185 } else
2186#endif
2187 {
2188 if (amrlev == finest_level) {
2189 norm = mf.norminf(0, ncomp, IntVect(0), true);
2190 } else {
2191 norm = mf.norminf(*m_norm_fine_mask[amrlev], 0, ncomp, IntVect(0), true);
2192 }
2193 }
2194
2196 return norm;
2197}
2198
2199template <typename MF>
2200void
2202{
2203 int ncomp = this->getNComp();
2204 for (int falev = this->NAMRLevels()-1; falev > 0; --falev)
2205 {
2206#ifdef AMREX_USE_EB
2207 if (!sol[falev].isAllRegular()) {
2208 if constexpr (std::is_same<MF,MultiFab>()) {
2209 amrex::EB_average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatioVect(falev-1));
2210 } else {
2211 amrex::Abort("EB_average_down only works with MultiFab");
2212 }
2213 } else
2214#endif
2215 {
2216 amrex::average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatioVect(falev-1));
2217 }
2218 }
2219}
2220
2221template <typename MF>
2222void
2223MLCellLinOpT<MF>::avgDownResAmr (int clev, MF& cres, MF const& fres) const
2224{
2225#ifdef AMREX_USE_EB
2226 if (!fres.isAllRegular()) {
2227 if constexpr (std::is_same<MF,MultiFab>()) {
2228 amrex::EB_average_down(fres, cres, 0, this->getNComp(),
2229 this->AMRRefRatioVect(clev));
2230 } else {
2231 amrex::Abort("EB_average_down only works with MultiFab");
2232 }
2233 } else
2234#endif
2235 {
2236 amrex::average_down(fres, cres, 0, this->getNComp(),
2237 this->AMRRefRatioVect(clev));
2238 }
2239}
2240
2241template <typename MF>
2242void
2244{
2245 this->m_precond_mode = true;
2246
2247 if (m_bndry_sol_zero.empty()) {
2248 m_bndry_sol_zero.resize(m_bndry_sol.size());
2249 const int ncomp = this->getNComp();
2250 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2251 m_bndry_sol_zero[amrlev] = std::make_unique<MLMGBndryT<MF>>
2252 (this->m_grids[amrlev][0],
2253 this->m_dmap[amrlev][0],
2254 ncomp,
2255 this->m_geom[amrlev][0]);
2256 }
2257 std::swap(m_bndry_sol, m_bndry_sol_zero);
2258 MF const* coarse_data_for_bc_save = this->m_coarse_data_for_bc;
2259 this->m_coarse_data_for_bc = nullptr;
2260 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2261 this->setLevelBC(amrlev, nullptr);
2262 }
2263 this->m_coarse_data_for_bc = coarse_data_for_bc_save;
2264 } else {
2265 std::swap(m_bndry_sol, m_bndry_sol_zero);
2266 }
2267}
2268
2269template <typename MF>
2270void
2272{
2273 this->m_precond_mode = false;
2274 std::swap(m_bndry_sol, m_bndry_sol_zero);
2275}
2276
2277extern template class MLCellLinOpT<MultiFab>;
2278
2280
2281}
2282
2283#endif
#define BL_TO_FORTRAN_BOX(x)
Definition AMReX_ArrayLim.H:51
#define BL_TO_FORTRAN_ANYD(x)
Definition AMReX_ArrayLim.H:44
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:110
#define AMREX_GPU_LAUNCH_HOST_DEVICE_LAMBDA_RANGE(TN, TI, block)
Definition AMReX_GpuLaunchMacrosC.nolint.H:4
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1139
Box cbx
Definition AMReX_HypreMLABecLap.cpp:1141
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
void amrex_mllinop_apply_bc(const int *lo, const int *hi, amrex_real *phi, const int *philo, const int *phihi, const int *mask, const int *mlo, const int *mhi, int cdir, int bct, amrex_real bcl, const amrex_real *bcval, const int *blo, const int *bhi, int maxorder, const amrex_real *dxinv, int inhomog, int nc, int cross)
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
Yet-another flux register for refluxing.
const FabSetT< MF > & bndryValues(Orientation face) const noexcept
Access the boundary values stored on orientation face.
Definition AMReX_BndryData.H:93
Maintain an identifier for boundary condition types.
Definition AMReX_BoundCond.H:25
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:568
void define(const Box &bx)
Initialize the BoxArray from a single box. It is an error if the BoxArray has already been initialize...
Definition AMReX_BoxArray.cpp:352
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Definition AMReX_BoxArray.cpp:672
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition AMReX_BoxArray.H:615
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
__host__ __device__ BoxND & setBig(const IntVectND< dim > &bg) noexcept
Redefine the big end of the BoxND.
Definition AMReX_Box.H:487
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
__host__ __device__ IntVectND< dim > size() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:147
__host__ __device__ BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:722
__host__ __device__ BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition AMReX_Box.H:698
__host__ static __device__ BoxND TheUnitBox() noexcept
This static member function returns a constant reference to an object of type BoxND representing the ...
Definition AMReX_Box.H:751
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
const Real * CellSize() const noexcept
Returns the cellsize for each coordinate direction.
Definition AMReX_CoordSys.H:71
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Definition AMReX_EBFabFactory.H:25
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:349
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:589
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:651
Definition AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:356
const Real * ProbLo() const noexcept
Returns the lo end of the problem domain in each dimension.
Definition AMReX_Geometry.H:179
GPU-compatible tuple.
Definition AMReX_Tuple.H:98
__host__ __device__ constexpr bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:425
__host__ __device__ IntVectND & setVal(int i, int val) noexcept
Set i'th coordinate of IntVectND to val.
Definition AMReX_IntVect.H:281
__host__ static __device__ constexpr IntVectND< dim > TheCellVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:718
__host__ __device__ constexpr int max() const noexcept
maximum (no absolute values) value
Definition AMReX_IntVect.H:221
An InterpBndryData object adds to a BndryData object the ability to manipulate and set the data store...
Definition AMReX_InterpBndryData.H:45
static constexpr int IBD_max_order_DEF
Definition AMReX_InterpBndryData.H:120
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:85
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:169
Definition AMReX_MLCellLinOp.H:24
virtual void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const =0
Vector< RT > getSolvabilityOffset(int amrlev, int mglev, MF const &rhs) const override
get offset for fixing solvability
Definition AMReX_MLCellLinOp.H:1620
void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs) override
Average-down data from fine AMR level to coarse AMR level.
Definition AMReX_MLCellLinOp.H:1252
void averageDownAndSync(Vector< MF > &sol) const override
Definition AMReX_MLCellLinOp.H:2201
BoxArray makeNGrids(int grid_size) const
Definition AMReX_MLCellLinOp.H:930
Vector< YAFluxRegisterT< MF > > m_fluxreg
Definition AMReX_MLCellLinOp.H:226
virtual void applyBC(int amrlev, int mglev, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr, bool skip_fillboundary=false) const
Definition AMReX_MLCellLinOp.H:710
void updateSolBC(int amrlev, const MF &crse_bcdata) const
Definition AMReX_MLCellLinOp.H:679
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_sol
Definition AMReX_MLCellLinOp.H:166
MLCellLinOpT< MF > & operator=(const MLCellLinOpT< MF > &)=delete
void compGrad(int amrlev, const Array< MF *, 3 > &grad, MF &sol, Location loc) const override
Compute gradients of the solution.
Definition AMReX_MLCellLinOp.H:1452
void avgDownResAmr(int clev, MF &cres, MF const &fres) const override
Definition AMReX_MLCellLinOp.H:2223
void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &, MF &, MF &fine_sol, const MF &) const final
Reflux at AMR coarse/fine boundary.
Definition AMReX_MLCellLinOp.H:1343
RT dotProductPrecond(Vector< MF const * > const &x, Vector< MF const * > const &y) const final
Definition AMReX_MLCellLinOp.H:2008
virtual void Fapply(int amrlev, int mglev, MF &out, const MF &in) const =0
typename FabDataType< MF >::value_type RT
Definition AMReX_MLCellLinOp.H:28
void endPrecondBC() override
Definition AMReX_MLCellLinOp.H:2271
void update() override
Update for reuse.
Definition AMReX_MLCellLinOp.H:672
void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr) final
Compute residual for the residual-correction form, resid = b - L(x)
Definition AMReX_MLCellLinOp.H:1317
MLCellLinOpT(const MLCellLinOpT< MF > &)=delete
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLCellLinOp.H:31
void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary, int niter) const final
Smooth.
Definition AMReX_MLCellLinOp.H:1273
RT normInf(int amrlev, MF const &mf, bool local) const override
Definition AMReX_MLCellLinOp.H:2101
virtual bool isCrossStencil() const
Definition AMReX_MLCellLinOp.H:71
void updateCorBC(int amrlev, const MF &crse_bcdata) const
Definition AMReX_MLCellLinOp.H:695
Array< RT, 2 *3 > RealTuple
Definition AMReX_MLCellLinOp.H:177
void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const override
Add interpolated coarse MG level data to fine MG level data.
Definition AMReX_MLCellLinOp.H:989
RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const final
x dot y, used by the bottom solver
Definition AMReX_MLCellLinOp.H:1994
Array< BoundCond, 2 *3 > BCTuple
Definition AMReX_MLCellLinOp.H:178
void prepareForSolve() override
Definition AMReX_MLCellLinOp.H:1677
void unapplyMetricTerm(int amrlev, int mglev, MF &rhs) const final
unapply metric terms if there are any
Definition AMReX_MLCellLinOp.H:1562
void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const override
Apply the linear operator, out = L(in)
Definition AMReX_MLCellLinOp.H:1263
void applyMetricTerm(int amrlev, int mglev, MF &rhs) const final
apply metric terms if there are any
Definition AMReX_MLCellLinOp.H:1506
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_cor_br
Definition AMReX_MLCellLinOp.H:170
RT norm2Precond(Vector< MF const * > const &x) const final
Definition AMReX_MLCellLinOp.H:2024
void beginPrecondBC() override
Definition AMReX_MLCellLinOp.H:2243
Vector< Vector< std::unique_ptr< BndryCondLoc > > > m_bcondloc
Definition AMReX_MLCellLinOp.H:216
void setGaussSeidel(bool flag) noexcept
Definition AMReX_MLCellLinOp.H:69
Vector< std::unique_ptr< MF > > m_robin_bcval
Definition AMReX_MLCellLinOp.H:158
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition AMReX_MLCellLinOp.H:378
~MLCellLinOpT() override=default
virtual bool isTensorOp() const
Definition AMReX_MLCellLinOp.H:72
void setInterpBndryHalfWidth(int w)
Definition AMReX_MLCellLinOp.H:160
void restriction(int, int, MF &crse, MF &fine) const override
Restriction onto coarse MG level.
Definition AMReX_MLCellLinOp.H:980
Vector< Vector< BndryRegisterT< MF > > > m_undrrelxr
Definition AMReX_MLCellLinOp.H:219
MLCellLinOpT()
Definition AMReX_MLCellLinOp.H:371
MLCellLinOpT(MLCellLinOpT< MF > &&)=delete
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLCellLinOp.H:27
void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const override
Overwrite fine MG level data with interpolated coarse data.
Definition AMReX_MLCellLinOp.H:1038
void fixSolvabilityByOffset(int amrlev, int mglev, MF &rhs, Vector< RT > const &offset) const override
fix solvability by subtracting offset from RHS
Definition AMReX_MLCellLinOp.H:1657
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_cor
Definition AMReX_MLCellLinOp.H:169
void compFlux(int amrlev, const Array< MF *, 3 > &fluxes, MF &sol, Location loc) const override
Compute fluxes.
Definition AMReX_MLCellLinOp.H:1411
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLCellLinOp.H:33
void prepareForFluxes(int amrlev, const MF *crse_bcdata=nullptr) override
Definition AMReX_MLCellLinOp.H:1308
void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr) override
Compute residual for solution.
Definition AMReX_MLCellLinOp.H:1290
bool m_has_metric_term
Definition AMReX_MLCellLinOp.H:164
bool m_use_gauss_seidel
Definition AMReX_MLCellLinOp.H:228
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_sol_zero
Definition AMReX_MLCellLinOp.H:172
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_sol_br
Definition AMReX_MLCellLinOp.H:167
void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const override
Interpolation between AMR levels.
Definition AMReX_MLCellLinOp.H:1131
virtual void addInhomogNeumannFlux(int, const Array< MF *, 3 > &, MF const &, bool) const
Definition AMReX_MLCellLinOp.H:137
Vector< std::unique_ptr< iMultiFab > > m_norm_fine_mask
Definition AMReX_MLCellLinOp.H:224
typename MLLinOpT< MF >::StateMode StateMode
Definition AMReX_MLCellLinOp.H:32
void setLevelBC(int amrlev, const MF *levelbcdata, const MF *robinbc_a=nullptr, const MF *robinbc_b=nullptr, const MF *robinbc_f=nullptr) final
Set boundary conditions for given level. For cell-centered solves only.
Definition AMReX_MLCellLinOp.H:539
bool needsUpdate() const override
Does it need update if it's reused?
Definition AMReX_MLCellLinOp.H:64
Vector< Vector< Array< MultiMask, 2 *3 > > > m_maskvals
Definition AMReX_MLCellLinOp.H:222
virtual void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, 3 > &flux, const FAB &sol, Location loc, int face_only=0) const =0
Definition AMReX_MLLinOp.H:102
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition AMReX_MLLinOp.H:271
virtual void update()
Update for reuse.
Definition AMReX_MLLinOp.H:273
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition AMReX_MLLinOp.H:785
Definition AMReX_MLMGBndry.H:12
static void setBoxBC(RealTuple &bloc, BCTuple &bctag, const Box &bx, const Box &domain, const Array< LinOpBCType, 3 > &lo, const Array< LinOpBCType, 3 > &hi, const Real *dx, IntVect const &ratio, const RealVect &interior_bloc, const Array< Real, 3 > &domain_bloc_lo, const Array< Real, 3 > &domain_bloc_hi, const GpuArray< int, 3 > &is_periodic, LinOpBCType a_crse_fine_bc_type)
Definition AMReX_MLMGBndry.H:107
Definition AMReX_Mask.H:33
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
An Iterator over the Orientation of Faces of a Box.
Definition AMReX_Orientation.H:135
Encapsulation of the Orientation of the Faces of a Box.
Definition AMReX_Orientation.H:29
@ low
Definition AMReX_Orientation.H:34
@ high
Definition AMReX_Orientation.H:34
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
__host__ __device__ BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) coarsening ratio.
Definition AMReX_Box.H:1409
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
std::array< T, N > Array
Definition AMReX_Array.H:26
Arena * The_Async_Arena()
Definition AMReX_Arena.cpp:815
void Sum(T &v, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:221
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
bool notInLaunchRegion() noexcept
Definition AMReX_GpuControl.H:93
bool inNoSyncRegion() noexcept
Definition AMReX_GpuControl.H:152
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition AMReX_Box.H:1735
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:349
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
__host__ __device__ BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direct...
Definition AMReX_Box.H:1714
__host__ __device__ BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition AMReX_Box.H:1522
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility ...
Definition AMReX_ParReduce.H:48
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value, MFInfo const &info)
Definition AMReX_MultiFabUtil.cpp:629
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
constexpr bool IsMultiFabLike_v
Definition AMReX_TypeTraits.H:49
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
__host__ __device__ BoxND< dim > adjCell(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to adjCellLo and adjCellHi; operates on given face.
Definition AMReX_Box.H:1757
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition AMReX_MFIter.H:249
__host__ __device__ BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition AMReX_Box.H:1495
LinOpBCType
Definition AMReX_LO_BCTYPES.H:27
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
RealVectND< 3 > RealVect
Definition AMReX_ParmParse.H:35
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
FAB::value_type Dot(FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
Compute dot products of two FabArrays.
Definition AMReX_FabArrayUtility.H:1673
__host__ __device__ constexpr int get(IntVectND< dim > const &iv) noexcept
Get I'th element of IntVectND<dim>
Definition AMReX_IntVect.H:1230
A multidimensional array accessor.
Definition AMReX_Array4.H:283
Definition AMReX_Dim3.H:12
int x
Definition AMReX_Dim3.H:12
int z
Definition AMReX_Dim3.H:12
int y
Definition AMReX_Dim3.H:12
Definition AMReX_FabDataType.H:9
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:43
Definition AMReX_MLLinOp.H:36
StateMode
Definition AMReX_MLLinOp.H:89
BCMode
Definition AMReX_MLLinOp.H:88
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
MFItInfo & SetDynamic(bool f) noexcept
Definition AMReX_MFIter.H:40
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition AMReX_MFIter.H:31
Definition AMReX_MLCellLinOp.H:19
Definition AMReX_FabArray.H:153
Struct for holding types.
Definition AMReX_TypeList.H:13