Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_MLCellLinOp.H
Go to the documentation of this file.
1#ifndef AMREX_ML_CELL_LINOP_H_
2#define AMREX_ML_CELL_LINOP_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MLLinOp.H>
6#include <AMReX_iMultiFab.H>
9#include <AMReX_MLLinOp_K.H>
10#include <AMReX_MLMG_K.H>
11
12#ifndef BL_NO_FORT
13#include <AMReX_MLLinOp_F.H>
14#endif
15
16namespace amrex {
17
18template <typename MF>
19class MLCellLinOpT // NOLINT(cppcoreguidelines-virtual-class-destructor)
20 : public MLLinOpT<MF>
21{
22public:
23
26
28 using BCMode = typename MLLinOpT<MF>::BCMode;
31
32 MLCellLinOpT ();
33 ~MLCellLinOpT () override = default;
34
35 MLCellLinOpT (const MLCellLinOpT<MF>&) = delete;
39
40 void define (const Vector<Geometry>& a_geom,
41 const Vector<BoxArray>& a_grids,
42 const Vector<DistributionMapping>& a_dmap,
43 const LPInfo& a_info = LPInfo(),
44 const Vector<FabFactory<FAB> const*>& a_factory = {});
45
46 void setLevelBC (int amrlev, const MF* levelbcdata,
47 const MF* robinbc_a = nullptr,
48 const MF* robinbc_b = nullptr,
49 const MF* robinbc_f = nullptr) final;
50
51 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF> &&
52 IsMultiFabLike_v<AMF>, int> = 0>
53 void setLevelBC (int amrlev, const AMF* levelbcdata,
54 const AMF* robinbc_a = nullptr,
55 const AMF* robinbc_b = nullptr,
56 const AMF* robinbc_f = nullptr)
57 {
58 this->MLLinOpT<MF>::template setLevelBC<AMF>(amrlev, levelbcdata, robinbc_a, robinbc_b, robinbc_f);
59 }
60
61 bool needsUpdate () const override {
63 }
64 void update () override;
65
66 void setGaussSeidel (bool flag) noexcept { m_use_gauss_seidel = flag; }
67
68 virtual bool isCrossStencil () const { return true; }
69 virtual bool isTensorOp () const { return false; }
70
71 void updateSolBC (int amrlev, const MF& crse_bcdata) const;
72 void updateCorBC (int amrlev, const MF& crse_bcdata) const;
73
74 virtual void applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode s_mode,
75 const MLMGBndryT<MF>* bndry=nullptr, bool skip_fillboundary=false) const;
76
77 BoxArray makeNGrids (int grid_size) const;
78
79 void restriction (int, int, MF& crse, MF& fine) const override;
80
81 void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const override;
82
83 void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const override;
84
85 void interpolationAmr (int famrlev, MF& fine, const MF& crse,
86 IntVect const& nghost) const override;
87
88 void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
89 const MF& fine_sol, const MF& fine_rhs) override;
90
91 void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
92 StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const override;
93 void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
94 bool skip_fillboundary, int niter) const final;
95
96 void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
97 const MF* crse_bcdata=nullptr) override;
98
99 void prepareForFluxes (int amrlev, const MF* crse_bcdata = nullptr) override;
100
101 void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
102 BCMode bc_mode, const MF* crse_bcdata=nullptr) final;
103
104 // The assumption is crse_sol's boundary has been filled, but not fine_sol.
105 void reflux (int crse_amrlev,
106 MF& res, const MF& crse_sol, const MF&,
107 MF&, MF& fine_sol, const MF&) const final;
108 void compFlux (int amrlev, const Array<MF*,AMREX_SPACEDIM>& fluxes,
109 MF& sol, Location loc) const override;
110 void compGrad (int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad,
111 MF& sol, Location loc) const override;
112
113 void applyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
114 void unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const final;
115 Vector<RT> getSolvabilityOffset (int amrlev, int mglev,
116 MF const& rhs) const override;
117 void fixSolvabilityByOffset (int amrlev, int mglev, MF& rhs,
118 Vector<RT> const& offset) const override;
119
120 void prepareForSolve () override;
121
122 RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const final;
123
124 RT dotProductPrecond (Vector<MF const*> const& x, Vector<MF const*> const& y) const final;
125
126 RT norm2Precond (Vector<MF const*> const& x) const final;
127
128 virtual void Fapply (int amrlev, int mglev, MF& out, const MF& in) const = 0;
129 virtual void Fsmooth (int amrlev, int mglev, MF& sol, const MF& rhs, int redblack) const = 0;
130 virtual void FFlux (int amrlev, const MFIter& mfi,
131 const Array<FAB*,AMREX_SPACEDIM>& flux,
132 const FAB& sol, Location loc, int face_only=0) const = 0;
133
134 virtual void addInhomogNeumannFlux (int /*amrlev*/,
135 const Array<MF*,AMREX_SPACEDIM>& /*grad*/,
136 MF const& /*sol*/,
137 bool /*mult_bcoef*/) const {}
138
139 RT normInf (int amrlev, MF const& mf, bool local) const override;
140
141 void averageDownAndSync (Vector<MF>& sol) const override;
142
143 void avgDownResAmr (int clev, MF& cres, MF const& fres) const override;
144
145 void beginPrecondBC () override;
146 void endPrecondBC () override;
147
149 struct BCTL {
150 BoundCond type;
151 RT location;
152 };
154
156
158
159protected:
160
161 bool m_has_metric_term = false;
162
165
168
170
171 // In case of agglomeration, coarse MG grids on amr level 0 are
172 // not simply coarsened from fine MG grids. So we need to build
173 // bcond and bcloc for each MG level.
177 class BndryCondLoc
178 {
179 public:
180 BndryCondLoc (const BoxArray& ba, const DistributionMapping& dm, int ncomp);
181
182 void setLOBndryConds (const Geometry& geom, const Real* dx,
185 IntVect const& ratio, const RealVect& interior_bloc,
186 const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
187 const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
188 LinOpBCType crse_fine_bc_type);
189
190 const Vector<BCTuple>& bndryConds (const MFIter& mfi) const noexcept {
191 return bcond[mfi];
192 }
193 const Vector<RealTuple>& bndryLocs (const MFIter& mfi) const noexcept {
194 return bcloc[mfi];
195 }
196 const BCTuple& bndryConds (const MFIter& mfi, int icomp) const noexcept {
197 return bcond[mfi][icomp];
198 }
199 const RealTuple& bndryLocs (const MFIter& mfi, int icomp) const noexcept {
200 return bcloc[mfi][icomp];
201 }
202 GpuArray<BCTL,2*AMREX_SPACEDIM> const* getBCTLPtr (const MFIter& mfi) const noexcept {
203 return bctl[mfi];
204 }
205 private:
210 int m_ncomp;
211 };
214
215 // used to save interpolation coefficients of the first interior cells
217
218 // boundary cell flags for covered, not_covered, outside_domain
220
222
224
225 bool m_use_gauss_seidel = true; // use red-black Gauss-Seidel by default
226
227private:
228
229 void defineAuxData ();
230 void defineBC ();
231
232 void computeVolInv () const;
233 mutable Vector<Vector<RT> > m_volinv; // used by solvability fix
234
236};
237
239
240template <typename T>
241struct MLMGABCTag {
242 Array4<T> fab;
243 Array4<T const> bcval_lo;
244 Array4<T const> bcval_hi;
245 Array4<int const> mask_lo;
246 Array4<int const> mask_hi;
247 T bcloc_lo;
248 T bcloc_hi;
249 Box bx;
250 BoundCond bctype_lo;
251 BoundCond bctype_hi;
252 int blen;
253 int comp;
254 int dir;
255
257 Box const& box() const noexcept { return bx; }
258};
259
260template <typename T>
261struct MLMGPSTag {
262 Array4<T> flo;
263 Array4<T> fhi;
264 Array4<int const> mlo;
265 Array4<int const> mhi;
266 T bcllo;
267 T bclhi;
268 Box bx;
269 BoundCond bctlo;
270 BoundCond bcthi;
271 int blen;
272 int comp;
273 int dir;
274
276 Box const& box() const noexcept { return bx; }
277};
278
279#ifdef AMREX_USE_EB
280template <typename T>
281struct MLMGPSEBTag {
282 Array4<T> flo;
283 Array4<T> fhi;
284 Array4<T const> ap;
285 Array4<int const> mlo;
286 Array4<int const> mhi;
287 T bcllo;
288 T bclhi;
289 Box bx;
290 BoundCond bctlo;
291 BoundCond bcthi;
292 int blen;
293 int comp;
294 int dir;
295
297 Box const& box() const noexcept { return bx; }
298};
299#endif
300
301template <typename MF>
302MLCellLinOpT<MF>::BndryCondLoc::BndryCondLoc (const BoxArray& ba,
303 const DistributionMapping& dm,
304 int ncomp)
305 : bcond(ba, dm),
306 bcloc(ba, dm),
307 bctl(ba, dm),
308 bctl_dv(bctl.local_size()*ncomp),
309 m_ncomp(ncomp)
310{
311 auto* dp = bctl_dv.data();
312 for (MFIter mfi(bcloc); mfi.isValid(); ++mfi) {
313 bcond[mfi].resize(ncomp);
314 bcloc[mfi].resize(ncomp);
315 bctl[mfi] = dp;
316 dp += ncomp;
317 }
318}
319
320template <typename MF>
321void
322MLCellLinOpT<MF>::BndryCondLoc::
323setLOBndryConds (const Geometry& geom, const Real* dx,
324 const Vector<Array<BCType,AMREX_SPACEDIM> >& lobc,
325 const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc,
326 IntVect const& ratio, const RealVect& interior_bloc,
327 const Array<Real,AMREX_SPACEDIM>& domain_bloc_lo,
328 const Array<Real,AMREX_SPACEDIM>& domain_bloc_hi,
329 LinOpBCType crse_fine_bc_type)
330{
331 const Box& domain = geom.Domain();
332
333#ifdef AMREX_USE_OMP
334#pragma omp parallel
335#endif
336 for (MFIter mfi(bcloc); mfi.isValid(); ++mfi)
337 {
338 const Box& bx = mfi.validbox();
339 for (int icomp = 0; icomp < m_ncomp; ++icomp) {
340 RealTuple & bloc = bcloc[mfi][icomp];
341 BCTuple & bctag = bcond[mfi][icomp];
342 MLMGBndryT<MF>::setBoxBC(bloc, bctag, bx, domain,
343 lobc[icomp], hibc[icomp],
344 dx, ratio, interior_bloc,
345 domain_bloc_lo, domain_bloc_hi,
346 geom.isPeriodicArray(),
347 crse_fine_bc_type);
348 }
349 }
350
351 Gpu::PinnedVector<GpuArray<BCTL,2*AMREX_SPACEDIM> > hv;
352 hv.reserve(bctl_dv.size());
353 for (MFIter mfi(bctl); mfi.isValid(); ++mfi)
354 {
355 for (int icomp = 0; icomp < m_ncomp; ++icomp) {
356 GpuArray<BCTL,2*AMREX_SPACEDIM> tmp;
357 for (int m = 0; m < 2*AMREX_SPACEDIM; ++m) {
358 tmp[m].type = bcond[mfi][icomp][m];
359 tmp[m].location = bcloc[mfi][icomp][m];
360 }
361 hv.push_back(std::move(tmp));
362 }
363 }
364 Gpu::copyAsync(Gpu::hostToDevice, hv.begin(), hv.end(), bctl_dv.begin());
366}
367
369
370template <typename MF>
372{
373 this->m_ixtype = IntVect::TheCellVector();
374}
375
376template <typename MF>
377void
379 const Vector<BoxArray>& a_grids,
380 const Vector<DistributionMapping>& a_dmap,
381 const LPInfo& a_info,
382 const Vector<FabFactory<FAB> const*>& a_factory)
383{
384 MLLinOpT<MF>::define(a_geom, a_grids, a_dmap, a_info, a_factory);
385 defineAuxData();
386 defineBC();
387}
388
389template <typename MF>
390void
392{
393 BL_PROFILE("MLCellLinOp::defineAuxData()");
394
395 m_undrrelxr.resize(this->m_num_amr_levels);
396 m_maskvals.resize(this->m_num_amr_levels);
397 m_fluxreg.resize(this->m_num_amr_levels-1);
398 m_norm_fine_mask.resize(this->m_num_amr_levels-1);
399
400 const int ncomp = this->getNComp();
401
402 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
403 {
404 m_undrrelxr[amrlev].resize(this->m_num_mg_levels[amrlev]);
405 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
406 {
407 m_undrrelxr[amrlev][mglev].define(this->m_grids[amrlev][mglev],
408 this->m_dmap[amrlev][mglev],
409 1, 0, 0, ncomp);
410 }
411 }
412
413 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
414 {
415 m_maskvals[amrlev].resize(this->m_num_mg_levels[amrlev]);
416 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
417 {
418 for (OrientationIter oitr; oitr; ++oitr)
419 {
420 const Orientation face = oitr();
421 const int ngrow = 1;
422 const int extent = this->isCrossStencil() ? 0 : 1; // extend to corners
423 m_maskvals[amrlev][mglev][face].define(this->m_grids[amrlev][mglev],
424 this->m_dmap[amrlev][mglev],
425 this->m_geom[amrlev][mglev],
426 face, 0, ngrow, extent, 1, true);
427 }
428 }
429 }
430
431 for (int amrlev = 0; amrlev < this->m_num_amr_levels-1; ++amrlev)
432 {
433 const IntVect ratio{this->m_amr_ref_ratio[amrlev]};
434 m_fluxreg[amrlev].define(this->m_grids[amrlev+1][0],
435 this->m_grids[amrlev][0],
436 this->m_dmap[amrlev+1][0],
437 this->m_dmap[amrlev][0],
438 this->m_geom[amrlev+1][0],
439 this->m_geom[amrlev][0],
440 ratio, amrlev+1, ncomp);
441 m_fluxreg[amrlev].setDeterministic(this->info.deterministic);
442 m_norm_fine_mask[amrlev] = std::make_unique<iMultiFab>
443 (makeFineMask(this->m_grids[amrlev][0], this->m_dmap[amrlev][0],
444 this->m_grids[amrlev+1][0],
445 ratio, 1, 0));
446 }
447
448#if (AMREX_SPACEDIM != 3)
449 m_has_metric_term = !this->m_geom[0][0].IsCartesian() && this->info.has_metric_term;
450#endif
451}
452
453template <typename MF>
454void
456{
457 BL_PROFILE("MLCellLinOp::defineBC()");
458
459 const int ncomp = this->getNComp();
460
461 m_bndry_sol.resize(this->m_num_amr_levels);
462 m_crse_sol_br.resize(this->m_num_amr_levels);
463
464 m_bndry_cor.resize(this->m_num_amr_levels);
465 m_crse_cor_br.resize(this->m_num_amr_levels);
466
467 m_robin_bcval.resize(this->m_num_amr_levels);
468
469 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
470 {
471 m_bndry_sol[amrlev] = std::make_unique<MLMGBndryT<MF>>(this->m_grids[amrlev][0],
472 this->m_dmap[amrlev][0],
473 ncomp,
474 this->m_geom[amrlev][0]);
475 }
476
477 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
478 {
479 const int in_rad = 0;
480 const int out_rad = 1;
481 const int extent_rad = 2;
482 const int crse_ratio = this->m_amr_ref_ratio[amrlev-1];
483 BoxArray cba = this->m_grids[amrlev][0];
484 cba.coarsen(crse_ratio);
485 m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
486 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
487 }
488
489 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
490 {
491 const int in_rad = 0;
492 const int out_rad = 1;
493 const int extent_rad = 2;
494 const int crse_ratio = this->m_amr_ref_ratio[amrlev-1];
495 BoxArray cba = this->m_grids[amrlev][0];
496 cba.coarsen(crse_ratio);
497 m_crse_cor_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
498 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
499 m_crse_cor_br[amrlev]->setVal(RT(0.0));
500 }
501
502 // This has be to done after m_crse_cor_br is defined.
503 for (int amrlev = 1; amrlev < this->m_num_amr_levels; ++amrlev)
504 {
505 m_bndry_cor[amrlev] = std::make_unique<MLMGBndryT<MF>>
506 (this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, this->m_geom[amrlev][0]);
507 MF bc_data(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, 1);
508 bc_data.setVal(0.0);
509
510 m_bndry_cor[amrlev]->setBndryValues(*m_crse_cor_br[amrlev], 0, bc_data, 0, 0, ncomp,
511 IntVect(this->m_amr_ref_ratio[amrlev-1]),
513 m_interpbndry_halfwidth);
514
516 (ncomp,Array<LinOpBCType,AMREX_SPACEDIM>{{AMREX_D_DECL(BCType::Dirichlet,
517 BCType::Dirichlet,
518 BCType::Dirichlet)}});
519 m_bndry_cor[amrlev]->setLOBndryConds(bclohi, bclohi, this->m_amr_ref_ratio[amrlev-1], RealVect{});
520 }
521
522 m_bcondloc.resize(this->m_num_amr_levels);
523 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
524 {
525 m_bcondloc[amrlev].resize(this->m_num_mg_levels[amrlev]);
526 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
527 {
528 m_bcondloc[amrlev][mglev] = std::make_unique<BndryCondLoc>(this->m_grids[amrlev][mglev],
529 this->m_dmap[amrlev][mglev],
530 ncomp);
531 }
532 }
533}
534
535template <typename MF>
536void
537MLCellLinOpT<MF>::setLevelBC (int amrlev, const MF* a_levelbcdata, const MF* robinbc_a,
538 const MF* robinbc_b, const MF* robinbc_f)
539{
540 BL_PROFILE("MLCellLinOp::setLevelBC()");
541
542 AMREX_ALWAYS_ASSERT(amrlev >= 0 && amrlev < this->m_num_amr_levels);
543
544 const int ncomp = this->getNComp();
545
546 MF zero;
547 IntVect ng(1);
548 if (this->hasHiddenDimension()) { ng[this->hiddenDirection()] = 0; }
549 if (a_levelbcdata == nullptr) {
550 zero.define(this->m_grids[amrlev][0], this->m_dmap[amrlev][0], ncomp, ng);
551 zero.setVal(RT(0.0));
552 } else {
553 AMREX_ALWAYS_ASSERT(a_levelbcdata->nGrowVect().allGE(ng));
554 }
555 const MF& bcdata = (a_levelbcdata == nullptr) ? zero : *a_levelbcdata;
556
557 IntVect br_ref_ratio(-1);
558
559 if (amrlev == 0)
560 {
561 if (this->needsCoarseDataForBC())
562 {
563 // AMREX_ALWAYS_ASSERT(!this->hasHiddenDimension());
564 if (this->hasHiddenDimension()) {
565 int hidden_dir = this->hiddenDirection();
566 AMREX_ALWAYS_ASSERT(this->m_coarse_data_crse_ratio[hidden_dir] == 1);
567 }
568 br_ref_ratio = this->m_coarse_data_crse_ratio.allGT(0) ? this->m_coarse_data_crse_ratio : IntVect(2);
569 if (this->m_crse_sol_br[amrlev] == nullptr && br_ref_ratio.allGT(0))
570 {
571 const int in_rad = 0;
572 const int out_rad = 1;
573 const int extent_rad = 2;
574 const IntVect crse_ratio = br_ref_ratio;
575 BoxArray cba = this->m_grids[amrlev][0];
576 cba.coarsen(crse_ratio);
577 this->m_crse_sol_br[amrlev] = std::make_unique<BndryRegisterT<MF>>
578 (cba, this->m_dmap[amrlev][0], in_rad, out_rad, extent_rad, ncomp);
579 }
580 if (this->m_coarse_data_for_bc != nullptr) {
581 AMREX_ALWAYS_ASSERT(this->m_coarse_data_crse_ratio.allGT(0));
582 const Box& cbx = amrex::coarsen(this->m_geom[0][0].Domain(), this->m_coarse_data_crse_ratio);
583 this->m_crse_sol_br[amrlev]->copyFrom(*(this->m_coarse_data_for_bc), 0, 0, 0, ncomp,
584 this->m_geom[0][0].periodicity(cbx));
585 } else {
586 this->m_crse_sol_br[amrlev]->setVal(RT(0.0));
587 }
588 this->m_bndry_sol[amrlev]->setBndryValues(*(this->m_crse_sol_br[amrlev]), 0,
589 bcdata, 0, 0, ncomp, br_ref_ratio,
591 this->m_interpbndry_halfwidth);
592 br_ref_ratio = this->m_coarse_data_crse_ratio;
593 }
594 else
595 {
596 this->m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
597 br_ref_ratio = IntVect(1);
598 }
599 }
600 else
601 {
602 this->m_bndry_sol[amrlev]->setPhysBndryValues(bcdata,0,0,ncomp);
603 br_ref_ratio = IntVect(this->m_amr_ref_ratio[amrlev-1]);
604 }
605
606 auto crse_fine_bc_type = (amrlev == 0) ? this->m_coarse_fine_bc_type : LinOpBCType::Dirichlet;
607 this->m_bndry_sol[amrlev]->setLOBndryConds(this->m_lobc, this->m_hibc, br_ref_ratio,
608 this->m_coarse_bc_loc, crse_fine_bc_type);
609
610 const Real* dx = this->m_geom[amrlev][0].CellSize();
611 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
612 {
613 this->m_bcondloc[amrlev][mglev]->setLOBndryConds(this->m_geom[amrlev][mglev], dx,
614 this->m_lobc, this->m_hibc,
615 br_ref_ratio, this->m_coarse_bc_loc,
616 this->m_domain_bloc_lo, this->m_domain_bloc_hi,
617 crse_fine_bc_type);
618 }
619
620 if (this->hasRobinBC()) {
621 AMREX_ASSERT(robinbc_a != nullptr && robinbc_b != nullptr && robinbc_f != nullptr);
622 this->m_robin_bcval[amrlev] = std::make_unique<MF>(this->m_grids[amrlev][0],
623 this->m_dmap[amrlev][0],
624 ncomp*3, 1);
625 const Box& domain = this->m_geom[amrlev][0].Domain();
626 MFItInfo mfi_info;
627 if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
628#ifdef AMREX_USE_OMP
629#pragma omp parallel if (Gpu::notInLaunchRegion())
630#endif
631 for (MFIter mfi(*(this->m_robin_bcval[amrlev]), mfi_info); mfi.isValid(); ++mfi) {
632 Box const& vbx = mfi.validbox();
633 Array4<RT const> const& ra = robinbc_a->const_array(mfi);
634 Array4<RT const> const& rb = robinbc_b->const_array(mfi);
635 Array4<RT const> const& rf = robinbc_f->const_array(mfi);
636 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
637 const Box& blo = amrex::adjCellLo(vbx, idim);
638 const Box& bhi = amrex::adjCellHi(vbx, idim);
639 bool outside_domain_lo = !(domain.contains(blo));
640 bool outside_domain_hi = !(domain.contains(bhi));
641 if ((!outside_domain_lo) && (!outside_domain_hi)) { continue; }
642 for (int icomp = 0; icomp < ncomp; ++icomp) {
643 Array4<RT> const& rbc = (*(this->m_robin_bcval[amrlev]))[mfi].array(icomp*3);
644 if (this->m_lobc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_lo)
645 {
647 {
648 rbc(i,j,k,0) = ra(i,j,k,icomp);
649 rbc(i,j,k,1) = rb(i,j,k,icomp);
650 rbc(i,j,k,2) = rf(i,j,k,icomp);
651 });
652 }
653 if (this->m_hibc_orig[icomp][idim] == LinOpBCType::Robin && outside_domain_hi)
654 {
656 {
657 rbc(i,j,k,0) = ra(i,j,k,icomp);
658 rbc(i,j,k,1) = rb(i,j,k,icomp);
659 rbc(i,j,k,2) = rf(i,j,k,icomp);
660 });
661 }
662 }
663 }
664 }
665 }
666}
667
668template <typename MF>
669void
674
675template <typename MF>
676void
677MLCellLinOpT<MF>::updateSolBC (int amrlev, const MF& crse_bcdata) const
678{
679 BL_PROFILE("MLCellLinOp::updateSolBC()");
680
681 AMREX_ALWAYS_ASSERT(amrlev > 0);
682 const int ncomp = this->getNComp();
683 m_crse_sol_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
684 this->m_geom[amrlev-1][0].periodicity());
685 m_bndry_sol[amrlev]->updateBndryValues(*m_crse_sol_br[amrlev], 0, 0, ncomp,
686 IntVect(this->m_amr_ref_ratio[amrlev-1]),
688 m_interpbndry_halfwidth);
689}
690
691template <typename MF>
692void
693MLCellLinOpT<MF>::updateCorBC (int amrlev, const MF& crse_bcdata) const
694{
695 BL_PROFILE("MLCellLinOp::updateCorBC()");
696 AMREX_ALWAYS_ASSERT(amrlev > 0);
697 const int ncomp = this->getNComp();
698 m_crse_cor_br[amrlev]->copyFrom(crse_bcdata, 0, 0, 0, ncomp,
699 this->m_geom[amrlev-1][0].periodicity());
700 m_bndry_cor[amrlev]->updateBndryValues(*m_crse_cor_br[amrlev], 0, 0, ncomp,
701 IntVect(this->m_amr_ref_ratio[amrlev-1]),
703 m_interpbndry_halfwidth);
704}
705
706template <typename MF>
707void
708MLCellLinOpT<MF>::applyBC (int amrlev, int mglev, MF& in, BCMode bc_mode, StateMode,
709 const MLMGBndryT<MF>* bndry, bool skip_fillboundary) const
710{
711 BL_PROFILE("MLCellLinOp::applyBC()");
712 // No coarsened boundary values, cannot apply inhomog at mglev>0.
713 BL_ASSERT(mglev == 0 || bc_mode == BCMode::Homogeneous);
714 BL_ASSERT(bndry != nullptr || bc_mode == BCMode::Homogeneous);
715
716 const int ncomp = this->getNComp();
717 const int cross = isCrossStencil();
718 const int tensorop = isTensorOp();
719 if (!skip_fillboundary) {
720 in.FillBoundary(0, ncomp, this->m_geom[amrlev][mglev].periodicity(), cross);
721 }
722
723 int flagbc = bc_mode == BCMode::Inhomogeneous;
724 const int imaxorder = this->maxorder;
725
726 const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
727 const RT dxi = static_cast<RT>(dxinv[0]);
728 const RT dyi = (AMREX_SPACEDIM >= 2) ? static_cast<RT>(dxinv[1]) : RT(1.0);
729 const RT dzi = (AMREX_SPACEDIM == 3) ? static_cast<RT>(dxinv[2]) : RT(1.0);
730
731 const auto& maskvals = m_maskvals[amrlev][mglev];
732 const auto& bcondloc = *m_bcondloc[amrlev][mglev];
733
734 FAB foofab(Box::TheUnitBox(),ncomp);
735 const auto& foo = foofab.const_array();
736
737 MFItInfo mfi_info;
738 if (Gpu::notInLaunchRegion()) { mfi_info.SetDynamic(true); }
739
741 "non-cross stencil not support for gpu");
742
743 const int hidden_direction = this->hiddenDirection();
744
745#ifdef AMREX_USE_GPU
746 if ((cross || tensorop) && Gpu::inLaunchRegion())
747 {
749 tags.reserve(in.local_size()*AMREX_SPACEDIM*ncomp);
750
751 for (MFIter mfi(in); mfi.isValid(); ++mfi) {
752 const Box& vbx = mfi.validbox();
753 const auto& iofab = in.array(mfi);
754 const auto & bdlv = bcondloc.bndryLocs(mfi);
755 const auto & bdcv = bcondloc.bndryConds(mfi);
756
757 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
758 if (idim != hidden_direction) {
759 const Orientation olo(idim,Orientation::low);
760 const Orientation ohi(idim,Orientation::high);
761 const auto& bvlo = (bndry != nullptr) ?
762 bndry->bndryValues(olo).const_array(mfi) : foo;
763 const auto& bvhi = (bndry != nullptr) ?
764 bndry->bndryValues(ohi).const_array(mfi) : foo;
765 for (int icomp = 0; icomp < ncomp; ++icomp) {
766 tags.emplace_back(MLMGABCTag<RT>{iofab, bvlo, bvhi,
767 maskvals[olo].const_array(mfi),
768 maskvals[ohi].const_array(mfi),
769 bdlv[icomp][olo], bdlv[icomp][ohi],
770 amrex::adjCell(vbx,olo),
771 bdcv[icomp][olo], bdcv[icomp][ohi],
772 vbx.length(idim), icomp, idim});
773 }
774 }
775 }
776 }
777
778 ParallelFor(tags,
779 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGABCTag<RT> const& tag) noexcept
780 {
781 if (tag.dir == 0)
782 {
783 mllinop_apply_bc_x(0, i, j, k, tag.blen, tag.fab,
784 tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
785 imaxorder, dxi, flagbc, tag.comp);
786 mllinop_apply_bc_x(1, i+tag.blen+1, j, k, tag.blen, tag.fab,
787 tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
788 imaxorder, dxi, flagbc, tag.comp);
789 }
790#if (AMREX_SPACEDIM > 1)
791 else
792#if (AMREX_SPACEDIM > 2)
793 if (tag.dir == 1)
794#endif
795 {
796 mllinop_apply_bc_y(0, i, j, k, tag.blen, tag.fab,
797 tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
798 imaxorder, dyi, flagbc, tag.comp);
799 mllinop_apply_bc_y(1, i, j+tag.blen+1, k, tag.blen, tag.fab,
800 tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
801 imaxorder, dyi, flagbc, tag.comp);
802 }
803#if (AMREX_SPACEDIM > 2)
804 else {
805 mllinop_apply_bc_z(0, i, j, k, tag.blen, tag.fab,
806 tag.mask_lo, tag.bctype_lo, tag.bcloc_lo, tag.bcval_lo,
807 imaxorder, dzi, flagbc, tag.comp);
808 mllinop_apply_bc_z(1, i, j, k+tag.blen+1, tag.blen, tag.fab,
809 tag.mask_hi, tag.bctype_hi, tag.bcloc_hi, tag.bcval_hi,
810 imaxorder, dzi, flagbc, tag.comp);
811 }
812#endif
813#endif
814 });
815 } else
816#endif
817 if (cross || tensorop)
818 {
819#ifdef AMREX_USE_OMP
820#pragma omp parallel if (Gpu::notInLaunchRegion())
821#endif
822 for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
823 {
824 const Box& vbx = mfi.validbox();
825 const auto& iofab = in.array(mfi);
826
827 const auto & bdlv = bcondloc.bndryLocs(mfi);
828 const auto & bdcv = bcondloc.bndryConds(mfi);
829
830 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
831 {
832 if (hidden_direction == idim) { continue; }
833 const Orientation olo(idim,Orientation::low);
834 const Orientation ohi(idim,Orientation::high);
835 const Box blo = amrex::adjCellLo(vbx, idim);
836 const Box bhi = amrex::adjCellHi(vbx, idim);
837 const int blen = vbx.length(idim);
838 const auto& mlo = maskvals[olo].array(mfi);
839 const auto& mhi = maskvals[ohi].array(mfi);
840 const auto& bvlo = (bndry != nullptr) ? bndry->bndryValues(olo).const_array(mfi) : foo;
841 const auto& bvhi = (bndry != nullptr) ? bndry->bndryValues(ohi).const_array(mfi) : foo;
842 for (int icomp = 0; icomp < ncomp; ++icomp) {
843 const BoundCond bctlo = bdcv[icomp][olo];
844 const BoundCond bcthi = bdcv[icomp][ohi];
845 const RT bcllo = bdlv[icomp][olo];
846 const RT bclhi = bdlv[icomp][ohi];
847 if (idim == 0) {
848 mllinop_apply_bc_x(0, blo, blen, iofab, mlo,
849 bctlo, bcllo, bvlo,
850 imaxorder, dxi, flagbc, icomp);
851 mllinop_apply_bc_x(1, bhi, blen, iofab, mhi,
852 bcthi, bclhi, bvhi,
853 imaxorder, dxi, flagbc, icomp);
854 } else if (idim == 1) {
855 mllinop_apply_bc_y(0, blo, blen, iofab, mlo,
856 bctlo, bcllo, bvlo,
857 imaxorder, dyi, flagbc, icomp);
858 mllinop_apply_bc_y(1, bhi, blen, iofab, mhi,
859 bcthi, bclhi, bvhi,
860 imaxorder, dyi, flagbc, icomp);
861 } else {
862 mllinop_apply_bc_z(0, blo, blen, iofab, mlo,
863 bctlo, bcllo, bvlo,
864 imaxorder, dzi, flagbc, icomp);
865 mllinop_apply_bc_z(1, bhi, blen, iofab, mhi,
866 bcthi, bclhi, bvhi,
867 imaxorder, dzi, flagbc, icomp);
868 }
869 }
870 }
871 }
872 }
873 else
874 {
875#ifdef BL_NO_FORT
876 amrex::Abort("amrex_mllinop_apply_bc not available when BL_NO_FORT=TRUE");
877#else
878 if constexpr (std::is_same_v<Real,RT>) {
879#ifdef AMREX_USE_OMP
880#pragma omp parallel
881#endif
882 for (MFIter mfi(in, mfi_info); mfi.isValid(); ++mfi)
883 {
884 const Box& vbx = mfi.validbox();
885
886 const auto & bdlv = bcondloc.bndryLocs(mfi);
887 const auto & bdcv = bcondloc.bndryConds(mfi);
888
889 const RealTuple & bdl = bdlv[0];
890 const BCTuple & bdc = bdcv[0];
891
892 for (OrientationIter oitr; oitr; ++oitr)
893 {
894 const Orientation ori = oitr();
895
896 int cdr = ori;
897 RT bcl = bdl[ori];
898 int bct = bdc[ori];
899
900 const auto& fsfab = (bndry != nullptr) ? bndry->bndryValues(ori)[mfi] : foofab;
901
902 const Mask& m = maskvals[ori][mfi];
903
905 BL_TO_FORTRAN_ANYD(in[mfi]),
907 cdr, bct, bcl,
908 BL_TO_FORTRAN_ANYD(fsfab),
909 imaxorder, dxinv, flagbc, ncomp, cross);
910 }
911 }
912 } else {
913 amrex::Abort("Not supported");
914 }
915#endif
916 }
917}
918
919template <typename MF>
921MLCellLinOpT<MF>::makeNGrids (int grid_size) const
922{
923 const Box& dombx = this->m_geom[0].back().Domain();
924
925 const BoxArray& old_ba = this->m_grids[0].back();
926 const int N = old_ba.size();
927 Vector<Box> bv;
928 bv.reserve(N);
929 for (int i = 0; i < N; ++i)
930 {
931 Box b = old_ba[i];
932 b.coarsen(grid_size);
933 b.refine(grid_size);
934 IntVect sz = b.size();
935 const IntVect nblks {AMREX_D_DECL(sz[0]/grid_size, sz[1]/grid_size, sz[2]/grid_size)};
936
937 IntVect big = b.smallEnd() + grid_size - 1;
938 b.setBig(big);
939
940#if (AMREX_SPACEDIM == 3)
941 for (int kk = 0; kk < nblks[2]; ++kk) {
942#endif
943#if (AMREX_SPACEDIM >= 2)
944 for (int jj = 0; jj < nblks[1]; ++jj) {
945#endif
946 for (int ii = 0; ii < nblks[0]; ++ii)
947 {
948 IntVect shft{AMREX_D_DECL(ii*grid_size,jj*grid_size,kk*grid_size)};
949 Box bb = amrex::shift(b,shft);
950 bb &= dombx;
951 bv.push_back(bb);
952 }
953#if (AMREX_SPACEDIM >= 2)
954 }
955#endif
956#if (AMREX_SPACEDIM == 3)
957 }
958#endif
959 }
960
961 std::sort(bv.begin(), bv.end());
962 bv.erase(std::unique(bv.begin(), bv.end()), bv.end());
963
964 BoxList bl(std::move(bv));
965
966 return BoxArray{std::move(bl)};
967}
968
969template <typename MF>
970void
971MLCellLinOpT<MF>::restriction (int amrlev, int cmglev, MF& crse, MF& fine) const
972{
973 const int ncomp = this->getNComp();
974 IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[cmglev-1];
975 amrex::average_down(fine, crse, 0, ncomp, ratio);
976}
977
978template <typename MF>
979void
980MLCellLinOpT<MF>::interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const
981{
982 const int ncomp = this->getNComp();
983
984 Dim3 ratio3 = {2,2,2};
985 IntVect ratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
986 AMREX_D_TERM(ratio3.x = ratio[0];,
987 ratio3.y = ratio[1];,
988 ratio3.z = ratio[2];);
989
990#ifdef AMREX_USE_GPU
991 if (Gpu::inLaunchRegion() && fine.isFusingCandidate()) {
992 auto const& finema = fine.arrays();
993 auto const& crsema = crse.const_arrays();
994 ParallelFor(fine, IntVect(0), ncomp,
995 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
996 {
997 int ic = amrex::coarsen(i,ratio3.x);
998 int jc = amrex::coarsen(j,ratio3.y);
999 int kc = amrex::coarsen(k,ratio3.z);
1000 finema[box_no](i,j,k,n) += crsema[box_no](ic,jc,kc,n);
1001 });
1003 } else
1004#endif
1005 {
1006#ifdef AMREX_USE_OMP
1007#pragma omp parallel if (Gpu::notInLaunchRegion())
1008#endif
1009 for (MFIter mfi(fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1010 {
1011 const Box& bx = mfi.tilebox();
1012 Array4<RT const> const& cfab = crse.const_array(mfi);
1013 Array4<RT> const& ffab = fine.array(mfi);
1014 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
1015 {
1016 int ic = amrex::coarsen(i,ratio3.x);
1017 int jc = amrex::coarsen(j,ratio3.y);
1018 int kc = amrex::coarsen(k,ratio3.z);
1019 ffab(i,j,k,n) += cfab(ic,jc,kc,n);
1020 });
1021 }
1022 }
1023}
1024
1025template <typename MF>
1026void
1027MLCellLinOpT<MF>::interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
1028{
1029 const int ncomp = this->getNComp();
1030
1031 const Geometry& crse_geom = this->Geom(amrlev,fmglev+1);
1032 const IntVect refratio = (amrlev > 0) ? IntVect(2) : this->mg_coarsen_ratio_vec[fmglev];
1033 const IntVect ng = crse.nGrowVect();
1034
1035 MF cfine;
1036 const MF* cmf;
1037
1039 {
1040 crse.FillBoundary(crse_geom.periodicity());
1041 cmf = &crse;
1042 }
1043 else
1044 {
1045 BoxArray cba = fine.boxArray();
1046 cba.coarsen(refratio);
1047 cfine.define(cba, fine.DistributionMap(), ncomp, ng);
1048 cfine.setVal(RT(0.0));
1049 cfine.ParallelCopy(crse, 0, 0, ncomp, IntVect(0), ng, crse_geom.periodicity());
1050 cmf = & cfine;
1051 }
1052
1053 bool isEB = fine.hasEBFabFactory();
1054 ignore_unused(isEB);
1055
1056#ifdef AMREX_USE_EB
1057 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(&(fine.Factory()));
1058 const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1059#endif
1060
1061 MFItInfo mfi_info;
1062 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1063#ifdef AMREX_USE_OMP
1064#pragma omp parallel if (Gpu::notInLaunchRegion())
1065#endif
1066 for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1067 {
1068 const Box& bx = mfi.tilebox();
1069 const auto& ff = fine.array(mfi);
1070 const auto& cc = cmf->array(mfi);
1071#ifdef AMREX_USE_EB
1072 bool call_lincc;
1073 if (isEB)
1074 {
1075 const auto& flag = (*flags)[mfi];
1076 if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1077 call_lincc = true;
1078 } else {
1079 Array4<EBCellFlag const> const& flg = flag.const_array();
1081 {
1082 mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1083 });
1084
1085 call_lincc = false;
1086 }
1087 }
1088 else
1089 {
1090 call_lincc = true;
1091 }
1092#else
1093 const bool call_lincc = true;
1094#endif
1095 if (call_lincc)
1096 {
1097#if (AMREX_SPACEDIM == 3)
1098 if (this->hasHiddenDimension()) {
1099 Box const& bx_2d = this->compactify(bx);
1100 auto const& ff_2d = this->compactify(ff);
1101 auto const& cc_2d = this->compactify(cc);
1103 {
1104 TwoD::mlmg_lin_cc_interp_r2(tbx, ff_2d, cc_2d, ncomp);
1105 });
1106 } else
1107#endif
1108 {
1110 {
1111 mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1112 });
1113 }
1114 }
1115 }
1116}
1117
1118template <typename MF>
1119void
1120MLCellLinOpT<MF>::interpolationAmr (int famrlev, MF& fine, const MF& crse,
1121 IntVect const& /*nghost*/) const
1122{
1123 const int ncomp = this->getNComp();
1124 const int refratio = this->AMRRefRatio(famrlev-1);
1125
1126#ifdef AMREX_USE_EB
1127 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(famrlev));
1128 const FabArray<EBCellFlagFab>* flags = (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1129#endif
1130
1131 MFItInfo mfi_info;
1132 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1133#ifdef AMREX_USE_OMP
1134#pragma omp parallel if (Gpu::notInLaunchRegion())
1135#endif
1136 for (MFIter mfi(fine, mfi_info); mfi.isValid(); ++mfi)
1137 {
1138 const Box& bx = mfi.tilebox();
1139 auto const& ff = fine.array(mfi);
1140 auto const& cc = crse.const_array(mfi);
1141#ifdef AMREX_USE_EB
1142 bool call_lincc;
1143 if (factory)
1144 {
1145 const auto& flag = (*flags)[mfi];
1146 if (flag.getType(amrex::grow(bx,1)) == FabType::regular) {
1147 call_lincc = true;
1148 } else {
1149 Array4<EBCellFlag const> const& flg = flag.const_array();
1150 switch(refratio) {
1151 case 2:
1152 {
1154 {
1155 mlmg_eb_cc_interp_r<2>(tbx, ff, cc, flg, ncomp);
1156 });
1157 break;
1158 }
1159 case 4:
1160 {
1162 {
1163 mlmg_eb_cc_interp_r<4>(tbx, ff, cc, flg, ncomp);
1164 });
1165 break;
1166 }
1167 default:
1168 amrex::Abort("mlmg_eb_cc_interp: only refratio 2 and 4 are supported");
1169 }
1170
1171 call_lincc = false;
1172 }
1173 }
1174 else
1175 {
1176 call_lincc = true;
1177 }
1178#else
1179 const bool call_lincc = true;
1180#endif
1181 if (call_lincc)
1182 {
1183 switch(refratio) {
1184 case 2:
1185 {
1187 {
1188 mlmg_lin_cc_interp_r2(tbx, ff, cc, ncomp);
1189 });
1190 break;
1191 }
1192 case 4:
1193 {
1195 {
1196 mlmg_lin_cc_interp_r4(tbx, ff, cc, ncomp);
1197 });
1198 break;
1199 }
1200 default:
1201 amrex::Abort("mlmg_lin_cc_interp: only refratio 2 and 4 are supported");
1202 }
1203 }
1204 }
1205}
1206
1207template <typename MF>
1208void
1209MLCellLinOpT<MF>::averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
1210 const MF& fine_sol, const MF& fine_rhs)
1211{
1212 const auto amrrr = this->AMRRefRatio(camrlev);
1213 const int ncomp = this->getNComp();
1214 amrex::average_down(fine_sol, crse_sol, 0, ncomp, amrrr);
1215 amrex::average_down(fine_rhs, crse_rhs, 0, ncomp, amrrr);
1216}
1217
1218template <typename MF>
1219void
1220MLCellLinOpT<MF>::apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
1221 StateMode s_mode, const MLMGBndryT<MF>* bndry) const
1222{
1223 BL_PROFILE("MLCellLinOp::apply()");
1224 applyBC(amrlev, mglev, in, bc_mode, s_mode, bndry);
1225 Fapply(amrlev, mglev, out, in);
1226}
1227
1228template <typename MF>
1229void
1230MLCellLinOpT<MF>::smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
1231 bool skip_fillboundary, int niter) const
1232{
1233 BL_PROFILE("MLCellLinOp::smooth()");
1234 for (int i = 0; i < niter; ++i) {
1235 for (int redblack = 0; redblack < 2; ++redblack)
1236 {
1237 applyBC(amrlev, mglev, sol, BCMode::Homogeneous, StateMode::Solution,
1238 nullptr, skip_fillboundary);
1239 Fsmooth(amrlev, mglev, sol, rhs, redblack);
1240 skip_fillboundary = false;
1241 }
1242 }
1243}
1244
1245template <typename MF>
1246void
1247MLCellLinOpT<MF>::solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
1248 const MF* crse_bcdata)
1249{
1250 BL_PROFILE("MLCellLinOp::solutionResidual()");
1251 const int ncomp = this->getNComp();
1252 if (crse_bcdata != nullptr) {
1253 updateSolBC(amrlev, *crse_bcdata);
1254 }
1255 const int mglev = 0;
1256 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Solution,
1257 m_bndry_sol[amrlev].get());
1258
1259 AMREX_ASSERT(resid.nComp() == b.nComp());
1260 MF::Xpay(resid, RT(-1.0), b, 0, 0, ncomp, IntVect(0));
1261}
1262
1263template <typename MF>
1264void
1265MLCellLinOpT<MF>::prepareForFluxes (int amrlev, const MF* crse_bcdata)
1266{
1267 if (crse_bcdata != nullptr) {
1268 updateSolBC(amrlev, *crse_bcdata);
1269 }
1270}
1271
1272template <typename MF>
1273void
1274MLCellLinOpT<MF>::correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
1275 BCMode bc_mode, const MF* crse_bcdata)
1276{
1277 BL_PROFILE("MLCellLinOp::correctionResidual()");
1278 const int ncomp = this->getNComp();
1279 if (bc_mode == BCMode::Inhomogeneous)
1280 {
1281 if (crse_bcdata)
1282 {
1283 AMREX_ASSERT(mglev == 0 && amrlev > 0);
1284 updateCorBC(amrlev, *crse_bcdata);
1285 }
1286 apply(amrlev, mglev, resid, x, BCMode::Inhomogeneous, StateMode::Correction,
1287 m_bndry_cor[amrlev].get());
1288 }
1289 else
1290 {
1291 AMREX_ASSERT(crse_bcdata == nullptr);
1292 apply(amrlev, mglev, resid, x, BCMode::Homogeneous, StateMode::Correction, nullptr);
1293 }
1294
1295 MF::Xpay(resid, Real(-1.0), b, 0, 0, ncomp, IntVect(0));
1296}
1297
1298template <typename MF>
1299void
1300MLCellLinOpT<MF>::reflux (int crse_amrlev, MF& res, const MF& crse_sol, const MF&,
1301 MF&, MF& fine_sol, const MF&) const
1302{
1303 BL_PROFILE("MLCellLinOp::reflux()");
1304
1305 auto& fluxreg = m_fluxreg[crse_amrlev];
1306 fluxreg.reset();
1307
1308 const int ncomp = this->getNComp();
1309
1310 const int fine_amrlev = crse_amrlev+1;
1311
1312 Real dt = Real(1.0);
1313 const Real* crse_dx = this->m_geom[crse_amrlev][0].CellSize();
1314 const Real* fine_dx = this->m_geom[fine_amrlev][0].CellSize();
1315
1316 const int mglev = 0;
1317 applyBC(fine_amrlev, mglev, fine_sol, BCMode::Inhomogeneous, StateMode::Solution,
1318 m_bndry_sol[fine_amrlev].get());
1319
1320 MFItInfo mfi_info;
1321 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1322
1323#ifdef AMREX_USE_OMP
1324#pragma omp parallel if (Gpu::notInLaunchRegion())
1325#endif
1326 {
1328 Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1329 Array<FAB const*,AMREX_SPACEDIM> cpflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1330
1331 for (MFIter mfi(crse_sol, mfi_info); mfi.isValid(); ++mfi)
1332 {
1333 if (fluxreg.CrseHasWork(mfi))
1334 {
1335 const Box& tbx = mfi.tilebox();
1336 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1337 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1338 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1339 FFlux(crse_amrlev, mfi, pflux, crse_sol[mfi], Location::FaceCentroid);
1340 fluxreg.CrseAdd(mfi, cpflux, crse_dx, dt, RunOn::Gpu);
1341 }
1342 }
1343
1344#ifdef AMREX_USE_OMP
1345#pragma omp barrier
1346#endif
1347
1348 for (MFIter mfi(fine_sol, mfi_info); mfi.isValid(); ++mfi)
1349 {
1350 if (fluxreg.FineHasWork(mfi))
1351 {
1352 const Box& tbx = mfi.tilebox();
1353 const int face_only = true;
1354 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1355 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1356 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1357 FFlux(fine_amrlev, mfi, pflux, fine_sol[mfi], Location::FaceCentroid, face_only);
1358 fluxreg.FineAdd(mfi, cpflux, fine_dx, dt, RunOn::Gpu);
1359 }
1360 }
1361 }
1362
1363 fluxreg.Reflux(res);
1364}
1365
1366template <typename MF>
1367void
1369 MF& sol, Location loc) const
1370{
1371 BL_PROFILE("MLCellLinOp::compFlux()");
1372
1373 const int mglev = 0;
1374 const int ncomp = this->getNComp();
1375 applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1376 m_bndry_sol[amrlev].get());
1377
1378 MFItInfo mfi_info;
1379 if (Gpu::notInLaunchRegion()) { mfi_info.EnableTiling().SetDynamic(true); }
1380
1381#ifdef AMREX_USE_OMP
1382#pragma omp parallel if (Gpu::notInLaunchRegion())
1383#endif
1384 {
1386 Array<FAB*,AMREX_SPACEDIM> pflux {{ AMREX_D_DECL(flux.data(), flux.data()+1, flux.data()+2) }};
1387 for (MFIter mfi(sol, mfi_info); mfi.isValid(); ++mfi)
1388 {
1389 const Box& tbx = mfi.tilebox();
1390 AMREX_D_TERM(flux[0].resize(amrex::surroundingNodes(tbx,0),ncomp,The_Async_Arena());,
1391 flux[1].resize(amrex::surroundingNodes(tbx,1),ncomp,The_Async_Arena());,
1392 flux[2].resize(amrex::surroundingNodes(tbx,2),ncomp,The_Async_Arena()););
1393 FFlux(amrlev, mfi, pflux, sol[mfi], loc);
1394 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1395 const Box& nbx = mfi.nodaltilebox(idim);
1396 auto const& dst = fluxes[idim]->array(mfi);
1397 auto const& src = pflux[idim]->const_array();
1398 AMREX_HOST_DEVICE_PARALLEL_FOR_4D (nbx, ncomp, i, j, k, n,
1399 {
1400 dst(i,j,k,n) = src(i,j,k,n);
1401 });
1402 }
1403 }
1404 }
1405}
1406
1407template <typename MF>
1408void
1410 MF& sol, Location /*loc*/) const
1411{
1412 BL_PROFILE("MLCellLinOp::compGrad()");
1413
1414 if (sol.nComp() > 1) {
1415 amrex::Abort("MLCellLinOp::compGrad called, but only works for single-component solves");
1416 }
1417
1418 const int mglev = 0;
1419 applyBC(amrlev, mglev, sol, BCMode::Inhomogeneous, StateMode::Solution,
1420 m_bndry_sol[amrlev].get());
1421
1422 const int ncomp = this->getNComp();
1423
1424 AMREX_D_TERM(const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));,
1425 const RT dyi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1));,
1426 const RT dzi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)););
1427#ifdef AMREX_USE_OMP
1428#pragma omp parallel if (Gpu::notInLaunchRegion())
1429#endif
1430 for (MFIter mfi(sol, TilingIfNotGPU()); mfi.isValid(); ++mfi)
1431 {
1432 AMREX_D_TERM(const Box& xbx = mfi.nodaltilebox(0);,
1433 const Box& ybx = mfi.nodaltilebox(1);,
1434 const Box& zbx = mfi.nodaltilebox(2););
1435 const auto& s = sol.array(mfi);
1436 AMREX_D_TERM(const auto& gx = grad[0]->array(mfi);,
1437 const auto& gy = grad[1]->array(mfi);,
1438 const auto& gz = grad[2]->array(mfi););
1439
1440 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( xbx, ncomp, i, j, k, n,
1441 {
1442 gx(i,j,k,n) = dxi*(s(i,j,k,n) - s(i-1,j,k,n));
1443 });
1444#if (AMREX_SPACEDIM >= 2)
1445 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( ybx, ncomp, i, j, k, n,
1446 {
1447 gy(i,j,k,n) = dyi*(s(i,j,k,n) - s(i,j-1,k,n));
1448 });
1449#endif
1450#if (AMREX_SPACEDIM == 3)
1451 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( zbx, ncomp, i, j, k, n,
1452 {
1453 gz(i,j,k,n) = dzi*(s(i,j,k,n) - s(i,j,k-1,n));
1454 });
1455#endif
1456 }
1457
1458 addInhomogNeumannFlux(amrlev, grad, sol, false);
1459}
1460
1461template <typename MF>
1462void
1463MLCellLinOpT<MF>::applyMetricTerm (int amrlev, int mglev, MF& rhs) const
1464{
1465 amrex::ignore_unused(amrlev,mglev,rhs);
1466#if (AMREX_SPACEDIM != 3)
1467 if (!m_has_metric_term) { return; }
1468
1469 const int ncomp = rhs.nComp();
1470
1471 bool cc = rhs.ixType().cellCentered(0);
1472
1473 const Geometry& geom = this->m_geom[amrlev][mglev];
1474 const RT dx = static_cast<RT>(geom.CellSize(0));
1475 const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1476
1477#ifdef AMREX_USE_OMP
1478#pragma omp parallel if (Gpu::notInLaunchRegion())
1479#endif
1480 for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1481 {
1482 const Box& tbx = mfi.tilebox();
1483 auto const& rhsarr = rhs.array(mfi);
1484#if (AMREX_SPACEDIM == 1)
1485 if (cc) {
1486 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1487 {
1488 RT rc = probxlo + (i+RT(0.5))*dx;
1489 rhsarr(i,j,k,n) *= rc*rc;
1490 });
1491 } else {
1492 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1493 {
1494 RT re = probxlo + i*dx;
1495 rhsarr(i,j,k,n) *= re*re;
1496 });
1497 }
1498#elif (AMREX_SPACEDIM == 2)
1499 if (cc) {
1500 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1501 {
1502 RT rc = probxlo + (i+RT(0.5))*dx;
1503 rhsarr(i,j,k,n) *= rc;
1504 });
1505 } else {
1506 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1507 {
1508 RT re = probxlo + i*dx;
1509 rhsarr(i,j,k,n) *= re;
1510 });
1511 }
1512#endif
1513 }
1514#endif
1515}
1516
1517template <typename MF>
1518void
1519MLCellLinOpT<MF>::unapplyMetricTerm (int amrlev, int mglev, MF& rhs) const
1520{
1521 amrex::ignore_unused(amrlev,mglev,rhs);
1522#if (AMREX_SPACEDIM != 3)
1523 if (!m_has_metric_term) { return; }
1524
1525 const int ncomp = rhs.nComp();
1526
1527 bool cc = rhs.ixType().cellCentered(0);
1528
1529 const Geometry& geom = this->m_geom[amrlev][mglev];
1530 const RT dx = static_cast<RT>(geom.CellSize(0));
1531 const RT probxlo = static_cast<RT>(geom.ProbLo(0));
1532
1533#ifdef AMREX_USE_OMP
1534#pragma omp parallel if (Gpu::notInLaunchRegion())
1535#endif
1536 for (MFIter mfi(rhs,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1537 {
1538 const Box& tbx = mfi.tilebox();
1539 auto const& rhsarr = rhs.array(mfi);
1540#if (AMREX_SPACEDIM == 1)
1541 if (cc) {
1542 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1543 {
1544 RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1545 rhsarr(i,j,k,n) *= rcinv*rcinv;
1546 });
1547 } else {
1548 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1549 {
1550 RT re = probxlo + i*dx;
1551 RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1552 rhsarr(i,j,k,n) *= reinv*reinv;
1553 });
1554 }
1555#elif (AMREX_SPACEDIM == 2)
1556 if (cc) {
1557 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1558 {
1559 RT rcinv = RT(1.0)/(probxlo + (i+RT(0.5))*dx);
1560 rhsarr(i,j,k,n) *= rcinv;
1561 });
1562 } else {
1563 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( tbx, ncomp, i, j, k, n,
1564 {
1565 RT re = probxlo + i*dx;
1566 RT reinv = (re==RT(0.0)) ? RT(0.0) : RT(1.)/re;
1567 rhsarr(i,j,k,n) *= reinv;
1568 });
1569 }
1570#endif
1571 }
1572#endif
1573}
1574
1575template <typename MF>
1576auto
1577MLCellLinOpT<MF>::getSolvabilityOffset (int amrlev, int mglev, MF const& rhs) const
1578 -> Vector<RT>
1579{
1580 computeVolInv();
1581
1582 const int ncomp = this->getNComp();
1583 Vector<RT> offset(ncomp);
1584
1585#ifdef AMREX_USE_EB
1586 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
1587 if (factory && !factory->isAllRegular())
1588 {
1589 if constexpr (std::is_same<MF,MultiFab>()) {
1590 const MultiFab& vfrac = factory->getVolFrac();
1591 for (int c = 0; c < ncomp; ++c) {
1592 offset[c] = amrex::Dot(rhs, c, vfrac, 0, 1, IntVect(0), true)
1593 * m_volinv[amrlev][mglev];
1594 }
1595 } else {
1596 amrex::Abort("TODO: MLMG with EB only works with MultiFab");
1597 }
1598 }
1599 else
1600#endif
1601 {
1602 for (int c = 0; c < ncomp; ++c) {
1603 offset[c] = rhs.sum(c,IntVect(0),true) * m_volinv[amrlev][mglev];
1604 }
1605 }
1606
1608
1609 return offset;
1610}
1611
1612template <typename MF>
1613void
1614MLCellLinOpT<MF>::fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& rhs,
1615 Vector<RT> const& offset) const
1616{
1617 const int ncomp = this->getNComp();
1618 for (int c = 0; c < ncomp; ++c) {
1619 rhs.plus(-offset[c], c, 1);
1620 }
1621#ifdef AMREX_USE_EB
1622 if (!rhs.isAllRegular()) {
1623 if constexpr (std::is_same<MF,MultiFab>()) {
1624 amrex::EB_set_covered(rhs, 0, ncomp, 0, 0.0_rt);
1625 } else {
1626 amrex::Abort("amrex::EB_set_covered only works with MultiFab");
1627 }
1628 }
1629#endif
1630}
1631
1632template <typename MF>
1633void
1635{
1636 BL_PROFILE("MLCellLinOp::prepareForSolve()");
1637
1638 const int imaxorder = this->maxorder;
1639 const int ncomp = this->getNComp();
1640 const int hidden_direction = this->hiddenDirection();
1641 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
1642 {
1643 for (int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
1644 {
1645 const auto& bcondloc = *m_bcondloc[amrlev][mglev];
1646 const auto& maskvals = m_maskvals[amrlev][mglev];
1647
1648 const RT dxi = static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0));
1649 const RT dyi = static_cast<RT>((AMREX_SPACEDIM >= 2) ? this->m_geom[amrlev][mglev].InvCellSize(1) : Real(1.0));
1650 const RT dzi = static_cast<RT>((AMREX_SPACEDIM == 3) ? this->m_geom[amrlev][mglev].InvCellSize(2) : Real(1.0));
1651
1652 auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
1653 MF foo(this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev], ncomp, 0, MFInfo().SetAlloc(false));
1654
1655#ifdef AMREX_USE_EB
1656 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->m_factory[amrlev][mglev].get());
1657 const FabArray<EBCellFlagFab>* flags =
1658 (factory) ? &(factory->getMultiEBCellFlagFab()) : nullptr;
1659 auto area = (factory) ? factory->getAreaFrac()
1660 : Array<const MultiCutFab*,AMREX_SPACEDIM>{AMREX_D_DECL(nullptr,nullptr,nullptr)};
1662#endif
1663
1664#ifdef AMREX_USE_GPU
1665 if (Gpu::inLaunchRegion()) {
1666#ifdef AMREX_USE_EB
1667 if (factory && !factory->isAllRegular()) {
1668#if defined(AMREX_USE_CUDA) && defined(_WIN32)
1669 if (!std::is_same<MF,MultiFab>()) {
1670#else
1671 if constexpr (!std::is_same<MF,MultiFab>()) {
1672#endif
1673 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1674 } else {
1676 tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1677
1678 for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1679 {
1680 const Box& vbx = mfi.validbox();
1681
1682 const auto & bdlv = bcondloc.bndryLocs(mfi);
1683 const auto & bdcv = bcondloc.bndryConds(mfi);
1684
1685 auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1686
1687 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1688 {
1689 if (idim != hidden_direction && fabtyp != FabType::covered) {
1690 const Orientation olo(idim,Orientation::low);
1691 const Orientation ohi(idim,Orientation::high);
1692 auto const& ap = (fabtyp == FabType::singlevalued)
1693 ? area[idim]->const_array(mfi) : Array4<Real const>{};
1694 for (int icomp = 0; icomp < ncomp; ++icomp) {
1695 tags.emplace_back(MLMGPSEBTag<RT>{undrrelxr[olo].array(mfi),
1696 undrrelxr[ohi].array(mfi),
1697 ap,
1698 maskvals[olo].const_array(mfi),
1699 maskvals[ohi].const_array(mfi),
1700 bdlv[icomp][olo], bdlv[icomp][ohi],
1701 amrex::adjCell(vbx,olo),
1702 bdcv[icomp][olo], bdcv[icomp][ohi],
1703 vbx.length(idim), icomp, idim});
1704 }
1705 }
1706 }
1707 }
1708
1709 ParallelFor(tags,
1710 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSEBTag<RT> const& tag) noexcept
1711 {
1712 if (tag.ap) {
1713 if (tag.dir == 0)
1714 {
1715 mllinop_comp_interp_coef0_x_eb
1716 (0, i , j, k, tag.blen, tag.flo, tag.mlo, tag.ap,
1717 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1718 mllinop_comp_interp_coef0_x_eb
1719 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1720 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1721 }
1722#if (AMREX_SPACEDIM > 1)
1723 else
1724#if (AMREX_SPACEDIM > 2)
1725 if (tag.dir == 1)
1726#endif
1727 {
1728 mllinop_comp_interp_coef0_y_eb
1729 (0, i, j , k, tag.blen, tag.flo, tag.mlo, tag.ap,
1730 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1731 mllinop_comp_interp_coef0_y_eb
1732 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi, tag.ap,
1733 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1734 }
1735#if (AMREX_SPACEDIM > 2)
1736 else {
1738 (0, i, j, k , tag.blen, tag.flo, tag.mlo, tag.ap,
1739 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1741 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi, tag.ap,
1742 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1743 }
1744#endif
1745#endif
1746 } else {
1747 if (tag.dir == 0)
1748 {
1750 (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1751 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1753 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1754 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1755 }
1756#if (AMREX_SPACEDIM > 1)
1757 else
1758#if (AMREX_SPACEDIM > 2)
1759 if (tag.dir == 1)
1760#endif
1761 {
1763 (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1764 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1766 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1767 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1768 }
1769#if (AMREX_SPACEDIM > 2)
1770 else {
1772 (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1773 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1775 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1776 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1777 }
1778#endif
1779#endif
1780 }
1781 });
1782 }
1783 } else
1784#endif
1785 {
1787 tags.reserve(foo.local_size()*AMREX_SPACEDIM*ncomp);
1788
1789 for (MFIter mfi(foo); mfi.isValid(); ++mfi)
1790 {
1791 const Box& vbx = mfi.validbox();
1792
1793 const auto & bdlv = bcondloc.bndryLocs(mfi);
1794 const auto & bdcv = bcondloc.bndryConds(mfi);
1795
1796 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1797 {
1798 if (idim != hidden_direction) {
1799 const Orientation olo(idim,Orientation::low);
1800 const Orientation ohi(idim,Orientation::high);
1801 for (int icomp = 0; icomp < ncomp; ++icomp) {
1802 tags.emplace_back(MLMGPSTag<RT>{undrrelxr[olo].array(mfi),
1803 undrrelxr[ohi].array(mfi),
1804 maskvals[olo].const_array(mfi),
1805 maskvals[ohi].const_array(mfi),
1806 bdlv[icomp][olo], bdlv[icomp][ohi],
1807 amrex::adjCell(vbx,olo),
1808 bdcv[icomp][olo], bdcv[icomp][ohi],
1809 vbx.length(idim), icomp, idim});
1810 }
1811 }
1812 }
1813 }
1814
1815 ParallelFor(tags,
1816 [=] AMREX_GPU_DEVICE (int i, int j, int k, MLMGPSTag<RT> const& tag) noexcept
1817 {
1818 if (tag.dir == 0)
1819 {
1820 mllinop_comp_interp_coef0_x
1821 (0, i , j, k, tag.blen, tag.flo, tag.mlo,
1822 tag.bctlo, tag.bcllo, imaxorder, dxi, tag.comp);
1823 mllinop_comp_interp_coef0_x
1824 (1, i+tag.blen+1, j, k, tag.blen, tag.fhi, tag.mhi,
1825 tag.bcthi, tag.bclhi, imaxorder, dxi, tag.comp);
1826 }
1827#if (AMREX_SPACEDIM > 1)
1828 else
1829#if (AMREX_SPACEDIM > 2)
1830 if (tag.dir == 1)
1831#endif
1832 {
1833 mllinop_comp_interp_coef0_y
1834 (0, i, j , k, tag.blen, tag.flo, tag.mlo,
1835 tag.bctlo, tag.bcllo, imaxorder, dyi, tag.comp);
1836 mllinop_comp_interp_coef0_y
1837 (1, i, j+tag.blen+1, k, tag.blen, tag.fhi, tag.mhi,
1838 tag.bcthi, tag.bclhi, imaxorder, dyi, tag.comp);
1839 }
1840#if (AMREX_SPACEDIM > 2)
1841 else {
1843 (0, i, j, k , tag.blen, tag.flo, tag.mlo,
1844 tag.bctlo, tag.bcllo, imaxorder, dzi, tag.comp);
1846 (1, i, j, k+tag.blen+1, tag.blen, tag.fhi, tag.mhi,
1847 tag.bcthi, tag.bclhi, imaxorder, dzi, tag.comp);
1848 }
1849#endif
1850#endif
1851 });
1852 }
1853 } else
1854#endif
1855 {
1856#ifdef AMREX_USE_OMP
1857#pragma omp parallel
1858#endif
1859 for (MFIter mfi(foo, MFItInfo{}.SetDynamic(true)); mfi.isValid(); ++mfi)
1860 {
1861 const Box& vbx = mfi.validbox();
1862
1863 const auto & bdlv = bcondloc.bndryLocs(mfi);
1864 const auto & bdcv = bcondloc.bndryConds(mfi);
1865
1866#ifdef AMREX_USE_EB
1867 auto fabtyp = (flags) ? (*flags)[mfi].getType(vbx) : FabType::regular;
1868#endif
1869 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
1870 {
1871 if (idim == hidden_direction) { continue; }
1872 const Orientation olo(idim,Orientation::low);
1873 const Orientation ohi(idim,Orientation::high);
1874 const Box blo = amrex::adjCellLo(vbx, idim);
1875 const Box bhi = amrex::adjCellHi(vbx, idim);
1876 const int blen = vbx.length(idim);
1877 const auto& mlo = maskvals[olo].array(mfi);
1878 const auto& mhi = maskvals[ohi].array(mfi);
1879 const auto& flo = undrrelxr[olo].array(mfi);
1880 const auto& fhi = undrrelxr[ohi].array(mfi);
1881 for (int icomp = 0; icomp < ncomp; ++icomp) {
1882 const BoundCond bctlo = bdcv[icomp][olo];
1883 const BoundCond bcthi = bdcv[icomp][ohi];
1884 const auto bcllo = bdlv[icomp][olo];
1885 const auto bclhi = bdlv[icomp][ohi];
1886#ifdef AMREX_USE_EB
1887 if (fabtyp == FabType::singlevalued) {
1888 if constexpr (!std::is_same<MF,MultiFab>()) {
1889 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
1890 } else {
1891 auto const& ap = area[idim]->const_array(mfi);
1892 if (idim == 0) {
1894 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1895 imaxorder, dxi, icomp);
1897 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1898 imaxorder, dxi, icomp);
1899 } else if (idim == 1) {
1901 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1902 imaxorder, dyi, icomp);
1904 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1905 imaxorder, dyi, icomp);
1906 } else {
1908 (0, blo, blen, flo, mlo, ap, bctlo, bcllo,
1909 imaxorder, dzi, icomp);
1911 (1, bhi, blen, fhi, mhi, ap, bcthi, bclhi,
1912 imaxorder, dzi, icomp);
1913 }
1914 }
1915 } else if (fabtyp == FabType::regular)
1916#endif
1917 {
1918 if (idim == 0) {
1920 (0, blo, blen, flo, mlo, bctlo, bcllo,
1921 imaxorder, dxi, icomp);
1923 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1924 imaxorder, dxi, icomp);
1925 } else if (idim == 1) {
1927 (0, blo, blen, flo, mlo, bctlo, bcllo,
1928 imaxorder, dyi, icomp);
1930 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1931 imaxorder, dyi, icomp);
1932 } else {
1934 (0, blo, blen, flo, mlo, bctlo, bcllo,
1935 imaxorder, dzi, icomp);
1937 (1, bhi, blen, fhi, mhi, bcthi, bclhi,
1938 imaxorder, dzi, icomp);
1939 }
1940 }
1941 }
1942 }
1943 }
1944 }
1945 }
1946 }
1947}
1948
1949template <typename MF>
1950auto
1951MLCellLinOpT<MF>::xdoty (int /*amrlev*/, int /*mglev*/, const MF& x, const MF& y, bool local) const
1952 -> RT
1953{
1954 const int ncomp = this->getNComp();
1955 const IntVect nghost(0);
1956 RT result = amrex::Dot(x,0,y,0,ncomp,nghost,true);
1957 if (!local) {
1959 }
1960 return result;
1961}
1962
1963template <typename MF>
1964auto
1966{
1967 const int ncomp = this->getNComp();
1968 const IntVect nghost(0);
1969 RT result = 0;
1970 for (int ilev = 0; ilev < this->NAMRLevels()-1; ++ilev) {
1971 result += amrex::Dot(*m_norm_fine_mask[ilev], *x[ilev], 0, *y[ilev], 0, ncomp, nghost, true);
1972 }
1973 result += amrex::Dot(*x[this->NAMRLevels()-1], 0,
1974 *y[this->NAMRLevels()-1], 0, ncomp, nghost, true);
1976 return result;
1977}
1978
1979template <typename MF>
1980auto
1982{
1983 const int ncomp = this->getNComp();
1984 const IntVect nghost(0);
1985 RT result = 0;
1986 for (int ilev = 0; ilev < this->NAMRLevels()-1; ++ilev) {
1987 result += amrex::Dot(*m_norm_fine_mask[ilev], *x[ilev], 0, ncomp, nghost, true);
1988 }
1989 result += amrex::Dot(*x[this->NAMRLevels()-1], 0, ncomp, nghost, true);
1991 return std::sqrt(result);
1992}
1993
1994template <typename MF>
1995void
1997{
1998 if (!m_volinv.empty()) { return; }
1999
2000 m_volinv.resize(this->m_num_amr_levels);
2001 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2002 m_volinv[amrlev].resize(this->NMGLevels(amrlev));
2003 }
2004
2005 // We don't need to compute for every level
2006
2007 auto f = [&] (int amrlev, int mglev) {
2008#ifdef AMREX_USE_EB
2009 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev,mglev));
2010 if (factory && !factory->isAllRegular())
2011 {
2012 if constexpr (std::is_same<MF,MultiFab>()) {
2013 const auto& vfrac = factory->getVolFrac();
2014 m_volinv[amrlev][mglev] = vfrac.sum(0,true);
2015 } else {
2016 amrex::Abort("MLCellLinOp with EB only works with MultiFab");
2017 }
2018 }
2019 else
2020#endif
2021 {
2022 auto const npts = (this->m_coarse_fine_bc_type == LinOpBCType::Dirichlet)
2023 ? this->compactify(this->Geom(amrlev,mglev).Domain()).d_numPts()
2024 : this->m_grids[amrlev][mglev].d_numPts();
2025 AMREX_ASSERT(npts > 0.);
2026 m_volinv[amrlev][mglev] = RT(1.0 / npts);
2027 }
2028 };
2029
2030 // amrlev = 0, mglev = 0
2031 f(0,0);
2032
2033 int mgbottom = this->NMGLevels(0)-1;
2034 f(0,mgbottom);
2035
2036#ifdef AMREX_USE_EB
2037 RT temp1, temp2;
2038 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(0,0));
2039 if (factory && !factory->isAllRegular())
2040 {
2041 ParallelAllReduce::Sum<RT>({m_volinv[0][0], m_volinv[0][mgbottom]},
2043 temp1 = RT(1.0)/m_volinv[0][0];
2044 temp2 = RT(1.0)/m_volinv[0][mgbottom];
2045 }
2046 else
2047 {
2048 temp1 = m_volinv[0][0];
2049 temp2 = m_volinv[0][mgbottom];
2050 }
2051 m_volinv[0][0] = temp1;
2052 m_volinv[0][mgbottom] = temp2;
2053#endif
2054}
2055
2056template <typename MF>
2057auto
2058MLCellLinOpT<MF>::normInf (int amrlev, MF const& mf, bool local) const -> RT
2059{
2060 const int ncomp = this->getNComp();
2061 const int finest_level = this->NAMRLevels() - 1;
2062 RT norm = RT(0.0);
2063#ifdef AMREX_USE_EB
2064 const auto *factory = dynamic_cast<EBFArrayBoxFactory const*>(this->Factory(amrlev));
2065 if (factory && !factory->isAllRegular()) {
2066#if defined(AMREX_USE_CUDA) && defined(_WIN32)
2067 if (!std::is_same<MF,MultiFab>()) {
2068#else
2069 if constexpr (!std::is_same<MF,MultiFab>()) {
2070#endif
2071 amrex::Abort("MLCellLinOpT with EB only works with MultiFab");
2072 } else {
2073 const MultiFab& vfrac = factory->getVolFrac();
2074 if (amrlev == finest_level) {
2075#ifdef AMREX_USE_GPU
2076 if (Gpu::inLaunchRegion()) {
2077 auto const& ma = mf.const_arrays();
2078 auto const& vfrac_ma = vfrac.const_arrays();
2080 mf, IntVect(0), ncomp,
2081 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2083 {
2084 return std::abs(ma[box_no](i,j,k,n)
2085 *vfrac_ma[box_no](i,j,k));
2086 });
2087 } else
2088#endif
2089 {
2090#ifdef AMREX_USE_OMP
2091#pragma omp parallel reduction(max:norm)
2092#endif
2093 for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2094 Box const& bx = mfi.tilebox();
2095 auto const& fab = mf.const_array(mfi);
2096 auto const& v = vfrac.const_array(mfi);
2097 AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2098 {
2099 norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2100 });
2101 }
2102 }
2103 } else {
2104#ifdef AMREX_USE_GPU
2105 if (Gpu::inLaunchRegion()) {
2106 auto const& ma = mf.const_arrays();
2107 auto const& mask_ma = m_norm_fine_mask[amrlev]->const_arrays();
2108 auto const& vfrac_ma = vfrac.const_arrays();
2110 mf, IntVect(0), ncomp,
2111 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n)
2113 {
2114 if (mask_ma[box_no](i,j,k)) {
2115 return std::abs(ma[box_no](i,j,k,n)
2116 *vfrac_ma[box_no](i,j,k));
2117 } else {
2118 return Real(0.0);
2119 }
2120 });
2121 } else
2122#endif
2123 {
2124#ifdef AMREX_USE_OMP
2125#pragma omp parallel reduction(max:norm)
2126#endif
2127 for (MFIter mfi(mf,true); mfi.isValid(); ++mfi) {
2128 Box const& bx = mfi.tilebox();
2129 auto const& fab = mf.const_array(mfi);
2130 auto const& mask = m_norm_fine_mask[amrlev]->const_array(mfi);
2131 auto const& v = vfrac.const_array(mfi);
2132 AMREX_LOOP_4D(bx, ncomp, i, j, k, n,
2133 {
2134 if (mask(i,j,k)) {
2135 norm = std::max(norm, std::abs(fab(i,j,k,n)*v(i,j,k)));
2136 }
2137 });
2138 }
2139 }
2140 }
2141 }
2142 } else
2143#endif
2144 {
2145 if (amrlev == finest_level) {
2146 norm = mf.norminf(0, ncomp, IntVect(0), true);
2147 } else {
2148 norm = mf.norminf(*m_norm_fine_mask[amrlev], 0, ncomp, IntVect(0), true);
2149 }
2150 }
2151
2153 return norm;
2154}
2155
2156template <typename MF>
2157void
2159{
2160 int ncomp = this->getNComp();
2161 for (int falev = this->NAMRLevels()-1; falev > 0; --falev)
2162 {
2163#ifdef AMREX_USE_EB
2164 if (!sol[falev].isAllRegular()) {
2165 if constexpr (std::is_same<MF,MultiFab>()) {
2166 amrex::EB_average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatio(falev-1));
2167 } else {
2168 amrex::Abort("EB_average_down only works with MultiFab");
2169 }
2170 } else
2171#endif
2172 {
2173 amrex::average_down(sol[falev], sol[falev-1], 0, ncomp, this->AMRRefRatio(falev-1));
2174 }
2175 }
2176}
2177
2178template <typename MF>
2179void
2180MLCellLinOpT<MF>::avgDownResAmr (int clev, MF& cres, MF const& fres) const
2181{
2182#ifdef AMREX_USE_EB
2183 if (!fres.isAllRegular()) {
2184 if constexpr (std::is_same<MF,MultiFab>()) {
2185 amrex::EB_average_down(fres, cres, 0, this->getNComp(),
2186 this->AMRRefRatio(clev));
2187 } else {
2188 amrex::Abort("EB_average_down only works with MultiFab");
2189 }
2190 } else
2191#endif
2192 {
2193 amrex::average_down(fres, cres, 0, this->getNComp(),
2194 this->AMRRefRatio(clev));
2195 }
2196}
2197
2198template <typename MF>
2199void
2201{
2202 this->m_precond_mode = true;
2203
2204 if (m_bndry_sol_zero.empty()) {
2205 m_bndry_sol_zero.resize(m_bndry_sol.size());
2206 const int ncomp = this->getNComp();
2207 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2208 m_bndry_sol_zero[amrlev] = std::make_unique<MLMGBndryT<MF>>
2209 (this->m_grids[amrlev][0],
2210 this->m_dmap[amrlev][0],
2211 ncomp,
2212 this->m_geom[amrlev][0]);
2213 }
2214 std::swap(m_bndry_sol, m_bndry_sol_zero);
2215 MF const* coarse_data_for_bc_save = this->m_coarse_data_for_bc;
2216 this->m_coarse_data_for_bc = nullptr;
2217 for (int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
2218 this->setLevelBC(amrlev, nullptr);
2219 }
2220 this->m_coarse_data_for_bc = coarse_data_for_bc_save;
2221 } else {
2222 std::swap(m_bndry_sol, m_bndry_sol_zero);
2223 }
2224}
2225
2226template <typename MF>
2227void
2229{
2230 this->m_precond_mode = false;
2231 std::swap(m_bndry_sol, m_bndry_sol_zero);
2232}
2233
2234extern template class MLCellLinOpT<MultiFab>;
2235
2237
2238}
2239
2240#endif
#define BL_TO_FORTRAN_BOX(x)
Definition AMReX_ArrayLim.H:51
#define BL_TO_FORTRAN_ANYD(x)
Definition AMReX_ArrayLim.H:44
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:110
#define AMREX_GPU_LAUNCH_HOST_DEVICE_LAMBDA_RANGE(TN, TI, block)
Definition AMReX_GpuLaunchMacrosC.nolint.H:4
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
Box cbx
Definition AMReX_HypreMLABecLap.cpp:1091
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
void amrex_mllinop_apply_bc(const int *lo, const int *hi, amrex_real *phi, const int *philo, const int *phihi, const int *mask, const int *mlo, const int *mhi, int cdir, int bct, amrex_real bcl, const amrex_real *bcval, const int *blo, const int *bhi, int maxorder, const amrex_real *dxinv, int inhomog, int nc, int cross)
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
const FabSetT< MF > & bndryValues(Orientation face) const noexcept
Return FabSet on given face.
Definition AMReX_BndryData.H:77
Maintain an identifier for boundary condition types.
Definition AMReX_BoundCond.H:20
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
void define(const Box &bx)
Initialize the BoxArray from a single box. It is an error if the BoxArray has already been initialize...
Definition AMReX_BoxArray.cpp:352
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Definition AMReX_BoxArray.cpp:672
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition AMReX_BoxArray.H:614
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
__host__ __device__ BoxND & setBig(const IntVectND< dim > &bg) noexcept
Redefine the big end of the BoxND.
Definition AMReX_Box.H:487
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
__host__ __device__ IntVectND< dim > size() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:147
__host__ __device__ BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:722
__host__ __device__ BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition AMReX_Box.H:698
__host__ static __device__ BoxND TheUnitBox() noexcept
This static member function returns a constant reference to an object of type BoxND representing the ...
Definition AMReX_Box.H:751
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
const Real * CellSize() const noexcept
Returns the cellsize for each coordinate direction.
Definition AMReX_CoordSys.H:71
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Definition AMReX_EBFabFactory.H:25
const MultiFab & getVolFrac() const noexcept
Definition AMReX_EBFabFactory.H:56
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:347
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:587
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:649
Definition AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:356
const Real * ProbLo() const noexcept
Returns the lo end of the problem domain in each dimension.
Definition AMReX_Geometry.H:179
GPU-compatible tuple.
Definition AMReX_Tuple.H:98
__host__ __device__ IntVectND & setVal(int i, int val) noexcept
Set i'th coordinate of IntVectND to val.
Definition AMReX_IntVect.H:282
__host__ __device__ bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:426
__host__ static __device__ constexpr IntVectND< dim > TheCellVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:719
An InterpBndryData object adds to a BndryData object the ability to manipulate and set the data store...
Definition AMReX_InterpBndryData.H:40
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:63
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:147
Definition AMReX_MLCellLinOp.H:21
virtual void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const =0
Vector< RT > getSolvabilityOffset(int amrlev, int mglev, MF const &rhs) const override
get offset for fixing solvability
Definition AMReX_MLCellLinOp.H:1577
void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs) override
Average-down data from fine AMR level to coarse AMR level.
Definition AMReX_MLCellLinOp.H:1209
void averageDownAndSync(Vector< MF > &sol) const override
Definition AMReX_MLCellLinOp.H:2158
BoxArray makeNGrids(int grid_size) const
Definition AMReX_MLCellLinOp.H:921
Vector< YAFluxRegisterT< MF > > m_fluxreg
Definition AMReX_MLCellLinOp.H:223
virtual void applyBC(int amrlev, int mglev, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr, bool skip_fillboundary=false) const
Definition AMReX_MLCellLinOp.H:708
void updateSolBC(int amrlev, const MF &crse_bcdata) const
Definition AMReX_MLCellLinOp.H:677
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_sol
Definition AMReX_MLCellLinOp.H:163
MLCellLinOpT< MF > & operator=(const MLCellLinOpT< MF > &)=delete
void compGrad(int amrlev, const Array< MF *, 3 > &grad, MF &sol, Location loc) const override
Compute gradients of the solution.
Definition AMReX_MLCellLinOp.H:1409
void avgDownResAmr(int clev, MF &cres, MF const &fres) const override
Definition AMReX_MLCellLinOp.H:2180
void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &, MF &, MF &fine_sol, const MF &) const final
Reflux at AMR coarse/fine boundary.
Definition AMReX_MLCellLinOp.H:1300
RT dotProductPrecond(Vector< MF const * > const &x, Vector< MF const * > const &y) const final
Definition AMReX_MLCellLinOp.H:1965
virtual void Fapply(int amrlev, int mglev, MF &out, const MF &in) const =0
void defineBC()
Definition AMReX_MLCellLinOp.H:455
typename FabDataType< MF >::value_type RT
Definition AMReX_MLCellLinOp.H:25
void endPrecondBC() override
Definition AMReX_MLCellLinOp.H:2228
void update() override
Update for reuse.
Definition AMReX_MLCellLinOp.H:670
void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr) final
Compute residual for the residual-correction form, resid = b - L(x)
Definition AMReX_MLCellLinOp.H:1274
MLCellLinOpT(const MLCellLinOpT< MF > &)=delete
typename MLLinOpT< MF >::BCMode BCMode
Definition AMReX_MLCellLinOp.H:28
void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary, int niter) const final
Smooth.
Definition AMReX_MLCellLinOp.H:1230
RT normInf(int amrlev, MF const &mf, bool local) const override
Definition AMReX_MLCellLinOp.H:2058
virtual bool isCrossStencil() const
Definition AMReX_MLCellLinOp.H:68
void updateCorBC(int amrlev, const MF &crse_bcdata) const
Definition AMReX_MLCellLinOp.H:693
Array< RT, 2 *3 > RealTuple
Definition AMReX_MLCellLinOp.H:174
void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const override
Add interpolated coarse MG level data to fine MG level data.
Definition AMReX_MLCellLinOp.H:980
RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const final
x dot y, used by the bottom solver
Definition AMReX_MLCellLinOp.H:1951
Array< BoundCond, 2 *3 > BCTuple
Definition AMReX_MLCellLinOp.H:175
void prepareForSolve() override
Definition AMReX_MLCellLinOp.H:1634
void unapplyMetricTerm(int amrlev, int mglev, MF &rhs) const final
unapply metric terms if there are any
Definition AMReX_MLCellLinOp.H:1519
void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const override
Apply the linear operator, out = L(in)
Definition AMReX_MLCellLinOp.H:1220
void applyMetricTerm(int amrlev, int mglev, MF &rhs) const final
apply metric terms if there are any
Definition AMReX_MLCellLinOp.H:1463
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_cor_br
Definition AMReX_MLCellLinOp.H:167
RT norm2Precond(Vector< MF const * > const &x) const final
Definition AMReX_MLCellLinOp.H:1981
void beginPrecondBC() override
Definition AMReX_MLCellLinOp.H:2200
Vector< Vector< std::unique_ptr< BndryCondLoc > > > m_bcondloc
Definition AMReX_MLCellLinOp.H:213
void setGaussSeidel(bool flag) noexcept
Definition AMReX_MLCellLinOp.H:66
void defineAuxData()
Definition AMReX_MLCellLinOp.H:391
Vector< std::unique_ptr< MF > > m_robin_bcval
Definition AMReX_MLCellLinOp.H:155
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition AMReX_MLCellLinOp.H:378
~MLCellLinOpT() override=default
virtual bool isTensorOp() const
Definition AMReX_MLCellLinOp.H:69
void setInterpBndryHalfWidth(int w)
Definition AMReX_MLCellLinOp.H:157
void restriction(int, int, MF &crse, MF &fine) const override
Restriction onto coarse MG level.
Definition AMReX_MLCellLinOp.H:971
Vector< Vector< BndryRegisterT< MF > > > m_undrrelxr
Definition AMReX_MLCellLinOp.H:216
int m_interpbndry_halfwidth
Definition AMReX_MLCellLinOp.H:235
MLCellLinOpT()
Definition AMReX_MLCellLinOp.H:371
MLCellLinOpT(MLCellLinOpT< MF > &&)=delete
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLCellLinOp.H:24
void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const override
Overwrite fine MG level data with interpolated coarse data.
Definition AMReX_MLCellLinOp.H:1027
void fixSolvabilityByOffset(int amrlev, int mglev, MF &rhs, Vector< RT > const &offset) const override
fix solvability by subtracting offset from RHS
Definition AMReX_MLCellLinOp.H:1614
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_cor
Definition AMReX_MLCellLinOp.H:166
void compFlux(int amrlev, const Array< MF *, 3 > &fluxes, MF &sol, Location loc) const override
Compute fluxes.
Definition AMReX_MLCellLinOp.H:1368
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLCellLinOp.H:30
void prepareForFluxes(int amrlev, const MF *crse_bcdata=nullptr) override
Definition AMReX_MLCellLinOp.H:1265
void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr) override
Compute residual for solution.
Definition AMReX_MLCellLinOp.H:1247
Vector< Vector< RT > > m_volinv
Definition AMReX_MLCellLinOp.H:233
bool m_has_metric_term
Definition AMReX_MLCellLinOp.H:161
bool m_use_gauss_seidel
Definition AMReX_MLCellLinOp.H:225
Vector< std::unique_ptr< MLMGBndryT< MF > > > m_bndry_sol_zero
Definition AMReX_MLCellLinOp.H:169
Vector< std::unique_ptr< BndryRegisterT< MF > > > m_crse_sol_br
Definition AMReX_MLCellLinOp.H:164
void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const override
Interpolation between AMR levels.
Definition AMReX_MLCellLinOp.H:1120
virtual void addInhomogNeumannFlux(int, const Array< MF *, 3 > &, MF const &, bool) const
Definition AMReX_MLCellLinOp.H:134
void computeVolInv() const
Definition AMReX_MLCellLinOp.H:1996
Vector< std::unique_ptr< iMultiFab > > m_norm_fine_mask
Definition AMReX_MLCellLinOp.H:221
typename MLLinOpT< MF >::StateMode StateMode
Definition AMReX_MLCellLinOp.H:29
void setLevelBC(int amrlev, const MF *levelbcdata, const MF *robinbc_a=nullptr, const MF *robinbc_b=nullptr, const MF *robinbc_f=nullptr) final
Set boundary conditions for given level. For cell-centered solves only.
Definition AMReX_MLCellLinOp.H:537
bool needsUpdate() const override
Does it need update if it's reused?
Definition AMReX_MLCellLinOp.H:61
Vector< Vector< Array< MultiMask, 2 *3 > > > m_maskvals
Definition AMReX_MLCellLinOp.H:219
virtual void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, 3 > &flux, const FAB &sol, Location loc, int face_only=0) const =0
Definition AMReX_MLLinOp.H:102
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition AMReX_MLLinOp.H:271
virtual void update()
Update for reuse.
Definition AMReX_MLLinOp.H:273
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition AMReX_MLLinOp.H:778
Definition AMReX_MLMGBndry.H:12
static void setBoxBC(RealTuple &bloc, BCTuple &bctag, const Box &bx, const Box &domain, const Array< LinOpBCType, 3 > &lo, const Array< LinOpBCType, 3 > &hi, const Real *dx, IntVect const &ratio, const RealVect &interior_bloc, const Array< Real, 3 > &domain_bloc_lo, const Array< Real, 3 > &domain_bloc_hi, const GpuArray< int, 3 > &is_periodic, LinOpBCType a_crse_fine_bc_type)
Definition AMReX_MLMGBndry.H:107
Definition AMReX_Mask.H:28
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
An Iterator over the Orientation of Faces of a Box.
Definition AMReX_Orientation.H:135
Encapsulation of the Orientation of the Faces of a Box.
Definition AMReX_Orientation.H:29
@ low
Definition AMReX_Orientation.H:34
@ high
Definition AMReX_Orientation.H:34
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
Real sum(int comp=0, bool local=false) const
Returns the sum of component "comp" over the MultiFab – no ghost cells are included.
Definition AMReX_MultiFab.cpp:1223
__host__ __device__ BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) coarsening ratio.
Definition AMReX_Box.H:1409
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
std::array< T, N > Array
Definition AMReX_Array.H:25
void Sum(T &v, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:221
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:263
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
bool notInLaunchRegion() noexcept
Definition AMReX_GpuControl.H:93
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r2(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition AMReX_MLMG_2D_K.H:9
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition AMReX_Box.H:1735
__host__ __device__ void mllinop_comp_interp_coef0_z_eb(int side, Box const &box, int blen, Array4< Real > const &f, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, int maxorder, Real dzinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:811
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
__host__ __device__ void mllinop_apply_bc_x(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dxinv, int inhomog, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:14
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ T norm(const GpuComplex< T > &a_z) noexcept
Return the norm (magnitude squared) of a complex number.
Definition AMReX_GpuComplex.H:349
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition AMReX_Tuple.H:186
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r2(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition AMReX_MLMG_1D_K.H:9
__host__ __device__ BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direct...
Definition AMReX_Box.H:1714
__host__ __device__ BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition AMReX_Box.H:1522
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility ...
Definition AMReX_ParReduce.H:48
void EB_set_covered(MultiFab &mf, Real val)
Definition AMReX_EBMultiFabUtil.cpp:21
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mlmg_lin_cc_interp_r4(Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept
Definition AMReX_MLMG_1D_K.H:27
__host__ __device__ void mllinop_apply_bc_y(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dyinv, int inhomog, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:119
constexpr bool IsMultiFabLike_v
Definition AMReX_TypeTraits.H:49
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:27
__host__ __device__ BoxND< dim > adjCell(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to adjCellLo and adjCellHi; operates on given face.
Definition AMReX_Box.H:1757
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition AMReX_MFIter.H:225
__host__ __device__ void mllinop_comp_interp_coef0_x(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dxinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:329
__host__ __device__ BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition AMReX_Box.H:1495
LinOpBCType
Definition AMReX_LO_BCTYPES.H:22
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
RealVectND< 3 > RealVect
Definition AMReX_ParmParse.H:35
__host__ __device__ void mllinop_apply_bc_z(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dzinv, int inhomog, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:224
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
__host__ __device__ void mllinop_comp_interp_coef0_y_eb(int side, Box const &box, int blen, Array4< Real > const &f, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, int maxorder, Real dyinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:692
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
Arena * The_Async_Arena()
Definition AMReX_Arena.cpp:793
FAB::value_type Dot(FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
Compute dot products of two FabArrays.
Definition AMReX_FabArrayUtility.H:1673
__host__ __device__ void mllinop_comp_interp_coef0_z(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dzinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:491
__host__ __device__ void mllinop_comp_interp_coef0_y(int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dyinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:410
__host__ __device__ void mllinop_comp_interp_coef0_x_eb(int side, Box const &box, int blen, Array4< Real > const &f, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, int maxorder, Real dxinv, int icomp) noexcept
Definition AMReX_MLLinOp_K.H:573
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
Definition AMReX_MultiFabUtil.cpp:629
Definition AMReX_Array4.H:61
Definition AMReX_Dim3.H:12
int x
Definition AMReX_Dim3.H:12
int z
Definition AMReX_Dim3.H:12
int y
Definition AMReX_Dim3.H:12
Definition AMReX_FabDataType.H:9
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:40
Definition AMReX_MLLinOp.H:36
StateMode
Definition AMReX_MLLinOp.H:89
BCMode
Definition AMReX_MLLinOp.H:88
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
MFItInfo & SetDynamic(bool f) noexcept
Definition AMReX_MFIter.H:34
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition AMReX_MFIter.H:29
Struct for holding types.
Definition AMReX_TypeList.H:12