Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_YAFluxRegister.H
Go to the documentation of this file.
1#ifndef AMREX_YAFLUXREGISTER_H_
2#define AMREX_YAFLUXREGISTER_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MultiFab.H>
6#include <AMReX_iMultiFab.H>
7#include <AMReX_Geometry.H>
9
10#ifdef AMREX_USE_OMP
11#include <omp.h>
12#endif
13
14namespace amrex {
15
26template <typename MF>
28{
29public:
30
31 using T = typename MF::value_type;
32 using FAB = typename MF::fab_type;
33
34 YAFluxRegisterT () = default;
35
36 YAFluxRegisterT (const BoxArray& fba, const BoxArray& cba,
37 const DistributionMapping& fdm, const DistributionMapping& cdm,
38 const Geometry& fgeom, const Geometry& cgeom,
39 const IntVect& ref_ratio, int fine_lev, int nvar);
40
41 void define (const BoxArray& fba, const BoxArray& cba,
42 const DistributionMapping& fdm, const DistributionMapping& cdm,
43 const Geometry& fgeom, const Geometry& cgeom,
44 const IntVect& ref_ratio, int fine_lev, int nvar);
45
46 void reset ();
47
48 void CrseAdd (const MFIter& mfi,
49 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
50 const Real* dx, Real dt, RunOn runon) noexcept;
51
52 void CrseAdd (const MFIter& mfi,
53 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
54 const Real* dx, Real dt, int srccomp, int destcomp,
55 int numcomp, RunOn runon) noexcept;
56
57 void FineAdd (const MFIter& mfi,
58 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
59 const Real* dx, Real dt, RunOn runon) noexcept;
60
61 void FineAdd (const MFIter& mfi,
62 const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
63 const Real* dx, Real dt, int srccomp, int destcomp,
64 int numcomp, RunOn runon) noexcept;
65
66 void Reflux (MF& state, int dc = 0);
67 void Reflux (MF& state, int srccomp, int destcomp, int numcomp);
68
69 bool CrseHasWork (const MFIter& mfi) const noexcept {
70 return m_crse_fab_flag[mfi.LocalIndex()] != crse_cell;
71 }
72
73 bool FineHasWork (const MFIter& mfi) const noexcept {
74 return !(m_cfp_fab[mfi.LocalIndex()].empty());
75 }
76
77 MF& getFineData ();
78
79 MF& getCrseData ();
80
81 enum CellType : int {
82 // must be same as in AMReX_YAFluxRegiser_K.H
84 };
85
91 void setCrseVolume (MF const* cvol) { m_cvol = cvol; }
92
95 void setDeterministic (bool flag) { m_deterministic = flag; }
96 [[nodiscard]] bool getDeterministic () const { return m_deterministic; }
97
98protected:
99
103
108
111
115
116 MF const* m_cvol = nullptr;
117 bool m_deterministic = false;
118};
119
120template <typename MF>
122 const DistributionMapping& fdm, const DistributionMapping& cdm,
123 const Geometry& fgeom, const Geometry& cgeom,
124 const IntVect& ref_ratio, int fine_lev, int nvar)
125{
126 define(fba, cba, fdm, cdm, fgeom, cgeom, ref_ratio, fine_lev, nvar);
127}
128
129template <typename MF>
130void
132 const DistributionMapping& fdm, const DistributionMapping& cdm,
133 const Geometry& fgeom, const Geometry& cgeom,
134 const IntVect& ref_ratio, int fine_lev, int nvar)
135{
136 m_fine_geom = fgeom;
137 m_crse_geom = cgeom;
138 m_ratio = ref_ratio;
139 m_fine_level = fine_lev;
140 m_ncomp = nvar;
141
142 m_crse_data.define(cba, cdm, nvar, 0);
143
144 m_crse_flag.define(cba, cdm, 1, 1);
145
146 const auto& cperiod = m_crse_geom.periodicity();
147 const std::vector<IntVect>& pshifts = cperiod.shiftIntVect();
148
149 BoxArray cfba = fba;
150 cfba.coarsen(ref_ratio);
151
152 Box cdomain = m_crse_geom.Domain();
153 for (int idim=0; idim < AMREX_SPACEDIM; ++idim) {
154 if (m_crse_geom.isPeriodic(idim)) {
155 cdomain.grow(idim,1);
156 }
157 }
158
159 m_crse_fab_flag.resize(m_crse_flag.local_size(), crse_cell);
160
161 m_crse_flag.setVal(crse_cell);
162 {
163 iMultiFab foo(cfba, fdm, 1, 1, MFInfo().SetAlloc(false));
164 const FabArrayBase::CPC& cpc1 = m_crse_flag.getCPC(IntVect(1), foo, IntVect(1), cperiod);
165 m_crse_flag.setVal(crse_fine_boundary_cell, cpc1, 0, 1);
166 const FabArrayBase::CPC& cpc0 = m_crse_flag.getCPC(IntVect(1), foo, IntVect(0), cperiod);
167 m_crse_flag.setVal(fine_cell, cpc0, 0, 1);
168 auto recv_layout_mask = m_crse_flag.RecvLayoutMask(cpc0);
169#ifdef AMREX_USE_OMP
170#pragma omp parallel if (Gpu::notInLaunchRegion())
171#endif
172 for (MFIter mfi(m_crse_flag); mfi.isValid(); ++mfi) {
173 if (recv_layout_mask[mfi]) {
174 m_crse_fab_flag[mfi.LocalIndex()] = fine_cell;
175 }
176 }
177 }
178
179 BoxList cfp_bl;
180 Vector<int> cfp_procmap;
181 int nlocal = 0;
182 const int myproc = ParallelDescriptor::MyProc();
183 const auto n_cfba = static_cast<int>(cfba.size());
184 cfba.uniqify();
185
186#ifdef AMREX_USE_OMP
187
188 const int nthreads = omp_get_max_threads();
189 Vector<BoxList> bl_priv(nthreads, BoxList());
190 Vector<Vector<int> > procmap_priv(nthreads);
191 Vector<Vector<int> > localindex_priv(nthreads);
192#pragma omp parallel
193 {
194 BoxList bl_tmp;
195 const int tid = omp_get_thread_num();
196 BoxList& bl = bl_priv[tid];
197 Vector<int>& pmp = procmap_priv[tid];
198 Vector<int>& lid = localindex_priv[tid];
199#pragma omp for
200 for (int i = 0; i < n_cfba; ++i)
201 {
202 Box bx = amrex::grow(cfba[i], 1);
203 bx &= cdomain;
204
205 cfba.complementIn(bl_tmp, bx);
206 const auto ntmp = static_cast<int>(bl_tmp.size());
207 bl.join(bl_tmp);
208
209 int proc = fdm[i];
210 for (int j = 0; j < ntmp; ++j) {
211 pmp.push_back(proc);
212 }
213
214 if (proc == myproc) {
215 lid.push_back(ntmp);
216 }
217 }
218 }
219
220 for (auto const& bl : bl_priv) {
221 cfp_bl.join(bl);
222 }
223
224 for (auto const& pmp : procmap_priv) {
225 cfp_procmap.insert(std::end(cfp_procmap), std::begin(pmp), std::end(pmp));
226 }
227
228 for (auto& lid : localindex_priv) {
229 for (int nl : lid) {
230 for (int j = 0; j < nl; ++j) {
231 m_cfp_localindex.push_back(nlocal);
232 }
233 ++nlocal;
234 }
235 }
236
237#else
238
239 BoxList bl_tmp;
240 for (int i = 0; i < n_cfba; ++i)
241 {
242 Box bx = amrex::grow(cfba[i], 1);
243 bx &= cdomain;
244
245 cfba.complementIn(bl_tmp, bx);
246 const auto ntmp = static_cast<int>(bl_tmp.size());
247 cfp_bl.join(bl_tmp);
248
249 int proc = fdm[i];
250 for (int j = 0; j < ntmp; ++j) {
251 cfp_procmap.push_back(proc);
252 }
253
254 if (proc == myproc) {
255 for (int j = 0; j < ntmp; ++j) {
256 m_cfp_localindex.push_back(nlocal); // This Array store local index in fine ba/dm.
257 } // Its size is local size of cfp.
258 ++nlocal;
259 }
260 }
261
262#endif
263
264 // It's safe even if cfp_bl is empty.
265
266 BoxArray cfp_ba(std::move(cfp_bl));
267 DistributionMapping cfp_dm(std::move(cfp_procmap));
268 m_cfpatch.define(cfp_ba, cfp_dm, nvar, 0);
269
270 m_cfp_fab.resize(nlocal);
271 for (MFIter mfi(m_cfpatch); mfi.isValid(); ++mfi)
272 {
273 const int li = mfi.LocalIndex();
274 const int flgi = m_cfp_localindex[li];
275 FAB& fab = m_cfpatch[mfi];
276 m_cfp_fab[flgi].push_back(&fab);
277 }
278
279 bool is_periodic = m_fine_geom.isAnyPeriodic();
280 if (is_periodic) {
281 m_cfp_mask.define(cfp_ba, cfp_dm, 1, 0);
282 m_cfp_mask.setVal(T(1.0));
283
285
286 bool run_on_gpu = Gpu::inLaunchRegion();
287 amrex::ignore_unused(run_on_gpu, tags);
288
289 const Box& domainbox = m_crse_geom.Domain();
290
291#ifdef AMREX_USE_OMP
292#pragma omp parallel if (!run_on_gpu)
293#endif
294 {
295 std::vector< std::pair<int,Box> > isects;
296
297 for (MFIter mfi(m_cfp_mask); mfi.isValid(); ++mfi)
298 {
299 const Box& bx = mfi.fabbox();
300 if (!domainbox.contains(bx)) // part of the box is outside periodic boundary
301 {
302 FAB& fab = m_cfp_mask[mfi];
303#ifdef AMREX_USE_GPU
304 auto const& arr = m_cfp_mask.array(mfi);
305#endif
306 for (const auto& iv : pshifts)
307 {
308 if (iv != IntVect::TheZeroVector())
309 {
310 cfba.intersections(bx+iv, isects);
311 for (const auto& is : isects)
312 {
313 const Box& ibx = is.second - iv;
314#ifdef AMREX_USE_GPU
315 if (run_on_gpu) {
316 tags.push_back({arr,ibx});
317 } else
318#endif
319 {
320 fab.template setVal<RunOn::Host>(T(0.0), ibx);
321 }
322 }
323 }
324 }
325 }
326 }
327 }
328
329#ifdef AMREX_USE_GPU
330 amrex::ParallelFor(tags, 1,
331 [=] AMREX_GPU_DEVICE (int i, int j, int k, int n, Array4BoxTag<T> const& tag) noexcept
332 {
333 tag.dfab(i,j,k,n) = T(0);
334 });
335#endif
336 }
337}
338
339template <typename MF>
340void
342{
343 m_crse_data.setVal(T(0.0));
344 m_cfpatch.setVal(T(0.0));
345}
346
347template <typename MF>
348void
350 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
351 const Real* dx, Real dt, RunOn runon) noexcept
352{
353 BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
354 int srccomp = 0;
355 int destcomp = 0;
356 int numcomp = m_crse_data.nComp();
357 CrseAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
358}
359
360template <typename MF>
361void
363 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
364 const Real* dx, Real dt, int srccomp, int destcomp,
365 int numcomp, RunOn runon) noexcept
366{
367 BL_ASSERT(m_crse_data.nComp() >= destcomp+numcomp &&
368 flux[0]->nComp() >= srccomp+numcomp);
369
370 //
371 // We assume that the fluxes have been passed in starting at component srccomp
372 // "destcomp" refers to the indexing in the arrays internal to the EBFluxRegister
373 //
374
375 if (m_crse_fab_flag[mfi.LocalIndex()] == crse_cell) {
376 return; // this coarse fab is not close to fine fabs.
377 }
378
379 const Box& bx = mfi.tilebox();
380 AMREX_D_TERM(auto dtdx = static_cast<T>(dt/dx[0]);,
381 auto dtdy = static_cast<T>(dt/dx[1]);,
382 auto dtdz = static_cast<T>(dt/dx[2]););
383 AMREX_D_TERM(FAB const* fx = flux[0];,
384 FAB const* fy = flux[1];,
385 FAB const* fz = flux[2];);
386
387 if (m_cvol) {
388 AMREX_D_TERM(dtdx = T(dt);, dtdy = T(dt);, dtdz = T(dt););
389 }
390
391 auto dest_arr = m_crse_data.array(mfi,destcomp);
392 auto const flag = m_crse_flag.const_array(mfi);
393
394 AMREX_D_TERM(Array4<T const> fxarr = fx->const_array(srccomp);,
395 Array4<T const> fyarr = fy->const_array(srccomp);,
396 Array4<T const> fzarr = fz->const_array(srccomp););
397
399 {
400 yafluxreg_crseadd(tbx, dest_arr, flag, AMREX_D_DECL(fxarr,fyarr,fzarr),
401 AMREX_D_DECL(dtdx,dtdy,dtdz),numcomp);
402 });
403}
404
405template <typename MF>
406void
408 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
409 const Real* dx, Real dt, RunOn runon) noexcept
410{
411 BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
412 int srccomp = 0;
413 int destcomp = 0;
414 int numcomp = m_crse_data.nComp();
415 FineAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
416}
417
418template <typename MF>
419void
421 const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
422 const Real* dx, Real dt, int srccomp, int destcomp,
423 int numcomp, RunOn runon) noexcept
424{
425 BL_ASSERT(m_cfpatch.nComp() >= destcomp+numcomp &&
426 a_flux[0]->nComp() >= srccomp+numcomp);
427
428 //
429 // We assume that the fluxes have been passed in starting at component srccomp
430 // "destcomp" refers to the indexing in the arrays internal to the EBFluxRegister
431 //
432 const int li = mfi.LocalIndex();
433 Vector<FAB*>& cfp_fabs = m_cfp_fab[li];
434 if (cfp_fabs.empty()) { return; }
435
436 const Box& tbx = mfi.tilebox();
437 const Box& bx = amrex::coarsen(tbx, m_ratio);
438 const Box& fbx = amrex::refine(bx, m_ratio);
439
440 const T ratio = static_cast<T>(AMREX_D_TERM(m_ratio[0],*m_ratio[1],*m_ratio[2]));
441 std::array<T,AMREX_SPACEDIM> dtdx{{AMREX_D_DECL(static_cast<T>(dt/(dx[0]*ratio)),
442 static_cast<T>(dt/(dx[1]*ratio)),
443 static_cast<T>(dt/(dx[2]*ratio)))}};
444 const Dim3 rr = m_ratio.dim3();
445
446 if (m_cvol) {
447 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
448 dtdx[idim] = T(dt);
449 }
450 }
451
452 int fluxcomp = srccomp;
453 std::array<FAB const*,AMREX_SPACEDIM> flux{{AMREX_D_DECL(a_flux[0],a_flux[1],a_flux[2])}};
454 bool use_gpu = (runon == RunOn::Gpu) && Gpu::inLaunchRegion();
455 amrex::ignore_unused(use_gpu);
456 std::array<FAB,AMREX_SPACEDIM> ftmp;
457 if (fbx != tbx) {
458 AMREX_ASSERT(!use_gpu);
459 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
460 const Box& b = amrex::surroundingNodes(fbx,idim);
461 ftmp[idim].resize(b,numcomp);
462 ftmp[idim].template setVal<RunOn::Host>(T(0.0));
463 ftmp[idim].template copy<RunOn::Host>(*a_flux[idim], srccomp, 0, numcomp);
464 flux[idim] = &ftmp[idim];
465 fluxcomp = 0;
466 }
467 }
468
470
471 for (int idim=0; idim < AMREX_SPACEDIM; ++idim)
472 {
473 const Box& lobx = amrex::adjCellLo(bx, idim);
474 const Box& hibx = amrex::adjCellHi(bx, idim);
475 FAB const* f = flux[idim];
476 for (FAB* cfp : cfp_fabs)
477 {
478 {
479 const Box& lobx_is = lobx & cfp->box();
480 const int side = 0;
481 if (lobx_is.ok())
482 {
483 auto d = cfp->array(destcomp);
484 auto dtdxs = dtdx[idim];
485 int dirside = idim*2+side;
486 Array4<T const> farr = f->const_array(fluxcomp);
487 AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(runon, lobx_is, tmpbox,
488 {
489 yafluxreg_fineadd(tmpbox, d, farr, dtdxs, numcomp, dirside, rr);
490 });
491 }
492 }
493 {
494 const Box& hibx_is = hibx & cfp->box();
495 const int side = 1;
496 if (hibx_is.ok())
497 {
498 auto d = cfp->array(destcomp);
499 auto dtdxs = dtdx[idim];
500 int dirside = idim*2+side;
501 Array4<T const> farr = f->const_array(fluxcomp);
502 AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(runon, hibx_is, tmpbox,
503 {
504 yafluxreg_fineadd(tmpbox, d, farr, dtdxs, numcomp, dirside, rr);
505 });
506 }
507 }
508 }
509 }
510}
511
512template <typename MF>
513void
515{
516 int srccomp = 0;
517 int destcomp = dc;
518 int numcomp = m_ncomp;
519 Reflux(state, srccomp, destcomp, numcomp);
520}
521
522template <typename MF>
523void
524YAFluxRegisterT<MF>::Reflux (MF& state, int srccomp, int destcomp, int numcomp)
525{
526 //
527 // Here "srccomp" refers to the indexing in the arrays internal to the EBFluxRegister
528 // "destcomp" refers to the indexing in the external arrays being filled by refluxing
529 //
530 if (!m_cfp_mask.empty())
531 {
532#ifdef AMREX_USE_OMP
533#pragma omp parallel if (Gpu::notInLaunchRegion())
534#endif
535 for (MFIter mfi(m_cfpatch); mfi.isValid(); ++mfi)
536 {
537 const Box& bx = m_cfpatch[mfi].box();
538 auto const maskfab = m_cfp_mask.array(mfi);
539 auto cfptfab = m_cfpatch.array(mfi,srccomp);
540 AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, numcomp, i, j, k, n,
541 {
542 cfptfab(i,j,k,n) *= maskfab(i,j,k);
543 });
544 }
545 }
546
547 m_crse_data.ParallelCopy(m_cfpatch, srccomp, srccomp, numcomp,
548 IntVect(0), IntVect(0), m_crse_geom.periodicity(),
549 FabArrayBase::ADD, nullptr, m_deterministic);
550
551 BL_ASSERT(state.nComp() >= destcomp + numcomp);
552 if (m_cvol) {
553 auto const& dst = state.arrays();
554 auto const& src = m_crse_data.const_arrays();
555 auto const& vol = m_cvol->const_arrays();
556 amrex::ParallelFor(state, IntVect(0), numcomp,
557 [=] AMREX_GPU_DEVICE (int bno, int i, int j, int k, int n)
558 {
559 dst[bno](i,j,k,destcomp+n) += src[bno](i,j,k,srccomp+n) / vol[bno](i,j,k);
560 });
561 } else {
562 amrex::Add(state, m_crse_data, srccomp, destcomp, numcomp, 0);
563 }
564}
565
566template <typename MF>
567MF&
569{
570 return m_cfpatch;
571}
572
573template <typename MF>
574MF&
576{
577 return m_crse_data;
578}
579
581
582}
583
584#endif
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(where_to_run, box, tbox, block)
Definition AMReX_GpuLaunch.nolint.H:136
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
std::vector< std::pair< int, Box > > intersections(const Box &bx) const
Return intersections of Box and BoxArray.
Definition AMReX_BoxArray.cpp:1186
BoxList complementIn(const Box &b) const
Return box - boxarray.
Definition AMReX_BoxArray.cpp:1314
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Definition AMReX_BoxArray.cpp:672
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition AMReX_BoxArray.H:614
void uniqify()
Make ourselves unique.
Definition AMReX_BoxArray.cpp:1610
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
Long size() const noexcept
The number of Boxes in this BoxList.
Definition AMReX_BoxList.H:113
void join(const BoxList &blist)
Join the BoxList to ourselves.
Definition AMReX_BoxList.cpp:71
__host__ __device__ BoxND & grow(int i) noexcept
Definition AMReX_Box.H:641
__host__ __device__ BoxND< new_dim > resize() const noexcept
Return a new BoxND of size new_dim by either shrinking or expanding this BoxND.
Definition AMReX_Box.H:893
__host__ __device__ bool cellCentered() const noexcept
Return true if BoxND is cell-centered in all indexing directions.
Definition AMReX_Box.H:327
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
__host__ __device__ bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition AMReX_Box.H:208
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
@ ADD
Definition AMReX_FabArrayBase.H:394
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
__host__ static __device__ constexpr IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:680
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:63
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:147
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Definition AMReX_YAFluxRegister.H:28
MF & getFineData()
Definition AMReX_YAFluxRegister.H:568
Vector< Vector< FAB * > > m_cfp_fab
The size of this is (# of local fine grids (# of crse/fine patches for that grid))
Definition AMReX_YAFluxRegister.H:106
Vector< int > m_crse_fab_flag
Definition AMReX_YAFluxRegister.H:102
MF m_cfpatch
This is built on crse/fine patches.
Definition AMReX_YAFluxRegister.H:104
void Reflux(MF &state, int dc=0)
Definition AMReX_YAFluxRegister.H:514
Geometry m_fine_geom
Definition AMReX_YAFluxRegister.H:109
IntVect m_ratio
Definition AMReX_YAFluxRegister.H:112
void FineAdd(const MFIter &mfi, const std::array< FAB const *, 3 > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition AMReX_YAFluxRegister.H:407
void define(const BoxArray &fba, const BoxArray &cba, const DistributionMapping &fdm, const DistributionMapping &cdm, const Geometry &fgeom, const Geometry &cgeom, const IntVect &ref_ratio, int fine_lev, int nvar)
Definition AMReX_YAFluxRegister.H:131
bool CrseHasWork(const MFIter &mfi) const noexcept
Definition AMReX_YAFluxRegister.H:69
MF const * m_cvol
Definition AMReX_YAFluxRegister.H:116
iMultiFab m_crse_flag
Definition AMReX_YAFluxRegister.H:101
void setCrseVolume(MF const *cvol)
Definition AMReX_YAFluxRegister.H:91
MF m_crse_data
Definition AMReX_YAFluxRegister.H:100
Geometry m_crse_geom
Definition AMReX_YAFluxRegister.H:110
int m_fine_level
Definition AMReX_YAFluxRegister.H:113
void setDeterministic(bool flag)
Definition AMReX_YAFluxRegister.H:95
typename MF::fab_type FAB
Definition AMReX_YAFluxRegister.H:32
CellType
Definition AMReX_YAFluxRegister.H:81
@ fine_cell
Definition AMReX_YAFluxRegister.H:83
@ crse_cell
Definition AMReX_YAFluxRegister.H:83
@ crse_fine_boundary_cell
Definition AMReX_YAFluxRegister.H:83
bool FineHasWork(const MFIter &mfi) const noexcept
Definition AMReX_YAFluxRegister.H:73
MF & getCrseData()
Definition AMReX_YAFluxRegister.H:575
MF m_cfp_mask
Definition AMReX_YAFluxRegister.H:105
Vector< int > m_cfp_localindex
Definition AMReX_YAFluxRegister.H:107
typename MF::value_type T
Definition AMReX_YAFluxRegister.H:31
bool m_deterministic
Definition AMReX_YAFluxRegister.H:117
int m_ncomp
Definition AMReX_YAFluxRegister.H:114
bool getDeterministic() const
Definition AMReX_YAFluxRegister.H:96
void reset()
Definition AMReX_YAFluxRegister.H:341
void CrseAdd(const MFIter &mfi, const std::array< FAB const *, 3 > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition AMReX_YAFluxRegister.H:349
A Collection of IArrayBoxes.
Definition AMReX_iMultiFab.H:34
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
__host__ __device__ BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) coarsening ratio.
Definition AMReX_Box.H:1409
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
__host__ __device__ BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition AMReX_Box.H:1459
int MyProc() noexcept
Definition AMReX_ParallelDescriptor.H:126
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition AMReX_Box.H:1735
__host__ __device__ void yafluxreg_fineadd(Box const &bx, Array4< T > const &d, Array4< T const > const &f, T dtdx, int nc, int dirside, Dim3 const &rr) noexcept
Definition AMReX_YAFluxRegister_1D_K.H:36
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direct...
Definition AMReX_Box.H:1714
__host__ __device__ BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition AMReX_Box.H:1522
RunOn
Definition AMReX_GpuControl.H:69
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
void Add(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition AMReX_FabArray.H:241
__host__ __device__ void yafluxreg_crseadd(Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, T dtdx, int nc) noexcept
Definition AMReX_YAFluxRegister_1D_K.H:11
Definition AMReX_TagParallelFor.H:58
Array4< T > dfab
Definition AMReX_TagParallelFor.H:59
Definition AMReX_Array4.H:61
Definition AMReX_Dim3.H:12
parallel copy or add
Definition AMReX_FabArrayBase.H:538
FabArray memory allocation information.
Definition AMReX_FabArray.H:66