Block-Structured AMR Software Framework
AMReX_YAFluxRegister.H
Go to the documentation of this file.
1 #ifndef AMREX_YAFLUXREGISTER_H_
2 #define AMREX_YAFLUXREGISTER_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MultiFab.H>
6 #include <AMReX_iMultiFab.H>
7 #include <AMReX_Geometry.H>
9 
10 #ifdef AMREX_USE_OMP
11 #include <omp.h>
12 #endif
13 
14 namespace amrex {
15 
26 template <typename MF>
28 {
29 public:
30 
31  using T = typename MF::value_type;
32  using FAB = typename MF::fab_type;
33 
34  YAFluxRegisterT () = default;
35 
36  YAFluxRegisterT (const BoxArray& fba, const BoxArray& cba,
37  const DistributionMapping& fdm, const DistributionMapping& cdm,
38  const Geometry& fgeom, const Geometry& cgeom,
39  const IntVect& ref_ratio, int fine_lev, int nvar);
40 
41  void define (const BoxArray& fba, const BoxArray& cba,
42  const DistributionMapping& fdm, const DistributionMapping& cdm,
43  const Geometry& fgeom, const Geometry& cgeom,
44  const IntVect& ref_ratio, int fine_lev, int nvar);
45 
46  void reset ();
47 
48  void CrseAdd (const MFIter& mfi,
49  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
50  const Real* dx, Real dt, RunOn runon) noexcept;
51 
52  void CrseAdd (const MFIter& mfi,
53  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
54  const Real* dx, Real dt, int srccomp, int destcomp,
55  int numcomp, RunOn runon) noexcept;
56 
57  void FineAdd (const MFIter& mfi,
58  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
59  const Real* dx, Real dt, RunOn runon) noexcept;
60 
61  void FineAdd (const MFIter& mfi,
62  const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
63  const Real* dx, Real dt, int srccomp, int destcomp,
64  int numcomp, RunOn runon) noexcept;
65 
66  void Reflux (MF& state, int dc = 0);
67  void Reflux (MF& state, int srccomp, int destcomp, int numcomp);
68 
69  bool CrseHasWork (const MFIter& mfi) const noexcept {
70  return m_crse_fab_flag[mfi.LocalIndex()] != crse_cell;
71  }
72 
73  bool FineHasWork (const MFIter& mfi) const noexcept {
74  return !(m_cfp_fab[mfi.LocalIndex()].empty());
75  }
76 
77  MF& getFineData ();
78 
79  MF& getCrseData ();
80 
81  enum CellType : int {
82  // must be same as in AMReX_YAFluxRegiser_K.H
84  };
85 
91  void setCrseVolume (MF const* cvol) { m_cvol = cvol; }
92 
93 protected:
94 
98 
99  MF m_cfpatch;
103 
106 
109  int m_ncomp;
110 
111  MF const* m_cvol = nullptr;
112 };
113 
114 template <typename MF>
116  const DistributionMapping& fdm, const DistributionMapping& cdm,
117  const Geometry& fgeom, const Geometry& cgeom,
118  const IntVect& ref_ratio, int fine_lev, int nvar)
119 {
120  define(fba, cba, fdm, cdm, fgeom, cgeom, ref_ratio, fine_lev, nvar);
121 }
122 
123 template <typename MF>
124 void
126  const DistributionMapping& fdm, const DistributionMapping& cdm,
127  const Geometry& fgeom, const Geometry& cgeom,
128  const IntVect& ref_ratio, int fine_lev, int nvar)
129 {
130  m_fine_geom = fgeom;
131  m_crse_geom = cgeom;
132  m_ratio = ref_ratio;
133  m_fine_level = fine_lev;
134  m_ncomp = nvar;
135 
136  m_crse_data.define(cba, cdm, nvar, 0);
137 
138  m_crse_flag.define(cba, cdm, 1, 1);
139 
140  const auto& cperiod = m_crse_geom.periodicity();
141  const std::vector<IntVect>& pshifts = cperiod.shiftIntVect();
142 
143  BoxArray cfba = fba;
144  cfba.coarsen(ref_ratio);
145 
146  Box cdomain = m_crse_geom.Domain();
147  for (int idim=0; idim < AMREX_SPACEDIM; ++idim) {
148  if (m_crse_geom.isPeriodic(idim)) {
149  cdomain.grow(idim,1);
150  }
151  }
152 
153  m_crse_fab_flag.resize(m_crse_flag.local_size(), crse_cell);
154 
155  m_crse_flag.setVal(crse_cell);
156  {
157  iMultiFab foo(cfba, fdm, 1, 1, MFInfo().SetAlloc(false));
158  const FabArrayBase::CPC& cpc1 = m_crse_flag.getCPC(IntVect(1), foo, IntVect(1), cperiod);
159  m_crse_flag.setVal(crse_fine_boundary_cell, cpc1, 0, 1);
160  const FabArrayBase::CPC& cpc0 = m_crse_flag.getCPC(IntVect(1), foo, IntVect(0), cperiod);
161  m_crse_flag.setVal(fine_cell, cpc0, 0, 1);
162  auto recv_layout_mask = m_crse_flag.RecvLayoutMask(cpc0);
163 #ifdef AMREX_USE_OMP
164 #pragma omp parallel if (Gpu::notInLaunchRegion())
165 #endif
166  for (MFIter mfi(m_crse_flag); mfi.isValid(); ++mfi) {
167  if (recv_layout_mask[mfi]) {
168  m_crse_fab_flag[mfi.LocalIndex()] = fine_cell;
169  }
170  }
171  }
172 
173  BoxList cfp_bl;
174  Vector<int> cfp_procmap;
175  int nlocal = 0;
176  const int myproc = ParallelDescriptor::MyProc();
177  const auto n_cfba = static_cast<int>(cfba.size());
178  cfba.uniqify();
179 
180 #ifdef AMREX_USE_OMP
181 
182  const int nthreads = omp_get_max_threads();
183  Vector<BoxList> bl_priv(nthreads, BoxList());
184  Vector<Vector<int> > procmap_priv(nthreads);
185  Vector<Vector<int> > localindex_priv(nthreads);
186 #pragma omp parallel
187  {
188  BoxList bl_tmp;
189  const int tid = omp_get_thread_num();
190  BoxList& bl = bl_priv[tid];
191  Vector<int>& pmp = procmap_priv[tid];
192  Vector<int>& lid = localindex_priv[tid];
193 #pragma omp for
194  for (int i = 0; i < n_cfba; ++i)
195  {
196  Box bx = amrex::grow(cfba[i], 1);
197  bx &= cdomain;
198 
199  cfba.complementIn(bl_tmp, bx);
200  const auto ntmp = static_cast<int>(bl_tmp.size());
201  bl.join(bl_tmp);
202 
203  int proc = fdm[i];
204  for (int j = 0; j < ntmp; ++j) {
205  pmp.push_back(proc);
206  }
207 
208  if (proc == myproc) {
209  lid.push_back(ntmp);
210  }
211  }
212  }
213 
214  for (auto const& bl : bl_priv) {
215  cfp_bl.join(bl);
216  }
217 
218  for (auto const& pmp : procmap_priv) {
219  cfp_procmap.insert(std::end(cfp_procmap), std::begin(pmp), std::end(pmp));
220  }
221 
222  for (auto& lid : localindex_priv) {
223  for (int nl : lid) {
224  for (int j = 0; j < nl; ++j) {
225  m_cfp_localindex.push_back(nlocal);
226  }
227  ++nlocal;
228  }
229  }
230 
231 #else
232 
233  BoxList bl_tmp;
234  for (int i = 0; i < n_cfba; ++i)
235  {
236  Box bx = amrex::grow(cfba[i], 1);
237  bx &= cdomain;
238 
239  cfba.complementIn(bl_tmp, bx);
240  const auto ntmp = static_cast<int>(bl_tmp.size());
241  cfp_bl.join(bl_tmp);
242 
243  int proc = fdm[i];
244  for (int j = 0; j < ntmp; ++j) {
245  cfp_procmap.push_back(proc);
246  }
247 
248  if (proc == myproc) {
249  for (int j = 0; j < ntmp; ++j) {
250  m_cfp_localindex.push_back(nlocal); // This Array store local index in fine ba/dm.
251  } // Its size is local size of cfp.
252  ++nlocal;
253  }
254  }
255 
256 #endif
257 
258  // It's safe even if cfp_bl is empty.
259 
260  BoxArray cfp_ba(std::move(cfp_bl));
261  DistributionMapping cfp_dm(std::move(cfp_procmap));
262  m_cfpatch.define(cfp_ba, cfp_dm, nvar, 0);
263 
264  m_cfp_fab.resize(nlocal);
265  for (MFIter mfi(m_cfpatch); mfi.isValid(); ++mfi)
266  {
267  const int li = mfi.LocalIndex();
268  const int flgi = m_cfp_localindex[li];
269  FAB& fab = m_cfpatch[mfi];
270  m_cfp_fab[flgi].push_back(&fab);
271  }
272 
273  bool is_periodic = m_fine_geom.isAnyPeriodic();
274  if (is_periodic) {
275  m_cfp_mask.define(cfp_ba, cfp_dm, 1, 0);
276  m_cfp_mask.setVal(T(1.0));
277 
278  Vector<Array4BoxTag<T> > tags;
279 
280  bool run_on_gpu = Gpu::inLaunchRegion();
281  amrex::ignore_unused(run_on_gpu, tags);
282 
283  const Box& domainbox = m_crse_geom.Domain();
284 
285 #ifdef AMREX_USE_OMP
286 #pragma omp parallel if (!run_on_gpu)
287 #endif
288  {
289  std::vector< std::pair<int,Box> > isects;
290 
291  for (MFIter mfi(m_cfp_mask); mfi.isValid(); ++mfi)
292  {
293  const Box& bx = mfi.fabbox();
294  if (!domainbox.contains(bx)) // part of the box is outside periodic boundary
295  {
296  FAB& fab = m_cfp_mask[mfi];
297 #ifdef AMREX_USE_GPU
298  auto const& arr = m_cfp_mask.array(mfi);
299 #endif
300  for (const auto& iv : pshifts)
301  {
302  if (iv != IntVect::TheZeroVector())
303  {
304  cfba.intersections(bx+iv, isects);
305  for (const auto& is : isects)
306  {
307  const Box& ibx = is.second - iv;
308 #ifdef AMREX_USE_GPU
309  if (run_on_gpu) {
310  tags.push_back({arr,ibx});
311  } else
312 #endif
313  {
314  fab.template setVal<RunOn::Host>(T(0.0), ibx);
315  }
316  }
317  }
318  }
319  }
320  }
321  }
322 
323 #ifdef AMREX_USE_GPU
324  amrex::ParallelFor(tags, 1,
325  [=] AMREX_GPU_DEVICE (int i, int j, int k, int n, Array4BoxTag<T> const& tag) noexcept
326  {
327  tag.dfab(i,j,k,n) = T(0);
328  });
329 #endif
330  }
331 }
332 
333 template <typename MF>
334 void
336 {
337  m_crse_data.setVal(T(0.0));
338  m_cfpatch.setVal(T(0.0));
339 }
340 
341 template <typename MF>
342 void
344  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
345  const Real* dx, Real dt, RunOn runon) noexcept
346 {
347  BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
348  int srccomp = 0;
349  int destcomp = 0;
350  int numcomp = m_crse_data.nComp();
351  CrseAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
352 }
353 
354 template <typename MF>
355 void
357  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
358  const Real* dx, Real dt, int srccomp, int destcomp,
359  int numcomp, RunOn runon) noexcept
360 {
361  BL_ASSERT(m_crse_data.nComp() >= destcomp+numcomp &&
362  flux[0]->nComp() >= srccomp+numcomp);
363 
364  //
365  // We assume that the fluxes have been passed in starting at component srccomp
366  // "destcomp" refers to the indexing in the arrays internal to the EBFluxRegister
367  //
368 
369  if (m_crse_fab_flag[mfi.LocalIndex()] == crse_cell) {
370  return; // this coarse fab is not close to fine fabs.
371  }
372 
373  const Box& bx = mfi.tilebox();
374  AMREX_D_TERM(auto dtdx = static_cast<T>(dt/dx[0]);,
375  auto dtdy = static_cast<T>(dt/dx[1]);,
376  auto dtdz = static_cast<T>(dt/dx[2]););
377  AMREX_D_TERM(FAB const* fx = flux[0];,
378  FAB const* fy = flux[1];,
379  FAB const* fz = flux[2];);
380 
381  if (m_cvol) {
382  AMREX_D_TERM(dtdx = T(dt);, dtdy = T(dt);, dtdz = T(dt););
383  }
384 
385  auto dest_arr = m_crse_data.array(mfi,destcomp);
386  auto const flag = m_crse_flag.const_array(mfi);
387 
388  AMREX_D_TERM(Array4<T const> fxarr = fx->const_array(srccomp);,
389  Array4<T const> fyarr = fy->const_array(srccomp);,
390  Array4<T const> fzarr = fz->const_array(srccomp););
391 
392  AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG ( runon, bx, tbx,
393  {
394  yafluxreg_crseadd(tbx, dest_arr, flag, AMREX_D_DECL(fxarr,fyarr,fzarr),
395  AMREX_D_DECL(dtdx,dtdy,dtdz),numcomp);
396  });
397 }
398 
399 template <typename MF>
400 void
402  const std::array<FAB const*, AMREX_SPACEDIM>& flux,
403  const Real* dx, Real dt, RunOn runon) noexcept
404 {
405  BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
406  int srccomp = 0;
407  int destcomp = 0;
408  int numcomp = m_crse_data.nComp();
409  FineAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
410 }
411 
412 template <typename MF>
413 void
415  const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
416  const Real* dx, Real dt, int srccomp, int destcomp,
417  int numcomp, RunOn runon) noexcept
418 {
419  BL_ASSERT(m_cfpatch.nComp() >= destcomp+numcomp &&
420  a_flux[0]->nComp() >= srccomp+numcomp);
421 
422  //
423  // We assume that the fluxes have been passed in starting at component srccomp
424  // "destcomp" refers to the indexing in the arrays internal to the EBFluxRegister
425  //
426  const int li = mfi.LocalIndex();
427  Vector<FAB*>& cfp_fabs = m_cfp_fab[li];
428  if (cfp_fabs.empty()) { return; }
429 
430  const Box& tbx = mfi.tilebox();
431  const Box& bx = amrex::coarsen(tbx, m_ratio);
432  const Box& fbx = amrex::refine(bx, m_ratio);
433 
434  const T ratio = static_cast<T>(AMREX_D_TERM(m_ratio[0],*m_ratio[1],*m_ratio[2]));
435  std::array<T,AMREX_SPACEDIM> dtdx{{AMREX_D_DECL(static_cast<T>(dt/(dx[0]*ratio)),
436  static_cast<T>(dt/(dx[1]*ratio)),
437  static_cast<T>(dt/(dx[2]*ratio)))}};
438  const Dim3 rr = m_ratio.dim3();
439 
440  if (m_cvol) {
441  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
442  dtdx[idim] = T(dt);
443  }
444  }
445 
446  int fluxcomp = srccomp;
447  std::array<FAB const*,AMREX_SPACEDIM> flux{{AMREX_D_DECL(a_flux[0],a_flux[1],a_flux[2])}};
448  bool use_gpu = (runon == RunOn::Gpu) && Gpu::inLaunchRegion();
449  amrex::ignore_unused(use_gpu);
450  std::array<FAB,AMREX_SPACEDIM> ftmp;
451  if (fbx != tbx) {
452  AMREX_ASSERT(!use_gpu);
453  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
454  const Box& b = amrex::surroundingNodes(fbx,idim);
455  ftmp[idim].resize(b,numcomp);
456  ftmp[idim].template setVal<RunOn::Host>(T(0.0));
457  ftmp[idim].template copy<RunOn::Host>(*a_flux[idim], srccomp, 0, numcomp);
458  flux[idim] = &ftmp[idim];
459  fluxcomp = 0;
460  }
461  }
462 
464 
465  for (int idim=0; idim < AMREX_SPACEDIM; ++idim)
466  {
467  const Box& lobx = amrex::adjCellLo(bx, idim);
468  const Box& hibx = amrex::adjCellHi(bx, idim);
469  FAB const* f = flux[idim];
470  for (FAB* cfp : cfp_fabs)
471  {
472  {
473  const Box& lobx_is = lobx & cfp->box();
474  const int side = 0;
475  if (lobx_is.ok())
476  {
477  auto d = cfp->array(destcomp);
478  auto dtdxs = dtdx[idim];
479  int dirside = idim*2+side;
480  Array4<T const> farr = f->const_array(fluxcomp);
481  AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(runon, lobx_is, tmpbox,
482  {
483  yafluxreg_fineadd(tmpbox, d, farr, dtdxs, numcomp, dirside, rr);
484  });
485  }
486  }
487  {
488  const Box& hibx_is = hibx & cfp->box();
489  const int side = 1;
490  if (hibx_is.ok())
491  {
492  auto d = cfp->array(destcomp);
493  auto dtdxs = dtdx[idim];
494  int dirside = idim*2+side;
495  Array4<T const> farr = f->const_array(fluxcomp);
496  AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(runon, hibx_is, tmpbox,
497  {
498  yafluxreg_fineadd(tmpbox, d, farr, dtdxs, numcomp, dirside, rr);
499  });
500  }
501  }
502  }
503  }
504 }
505 
506 template <typename MF>
507 void
508 YAFluxRegisterT<MF>::Reflux (MF& state, int dc)
509 {
510  int srccomp = 0;
511  int destcomp = dc;
512  int numcomp = m_ncomp;
513  Reflux(state, srccomp, destcomp, numcomp);
514 }
515 
516 template <typename MF>
517 void
518 YAFluxRegisterT<MF>::Reflux (MF& state, int srccomp, int destcomp, int numcomp)
519 {
520  //
521  // Here "srccomp" refers to the indexing in the arrays internal to the EBFluxRegister
522  // "destcomp" refers to the indexing in the external arrays being filled by refluxing
523  //
524  if (!m_cfp_mask.empty())
525  {
526 #ifdef AMREX_USE_OMP
527 #pragma omp parallel if (Gpu::notInLaunchRegion())
528 #endif
529  for (MFIter mfi(m_cfpatch); mfi.isValid(); ++mfi)
530  {
531  const Box& bx = m_cfpatch[mfi].box();
532  auto const maskfab = m_cfp_mask.array(mfi);
533  auto cfptfab = m_cfpatch.array(mfi,srccomp);
534  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, numcomp, i, j, k, n,
535  {
536  cfptfab(i,j,k,n) *= maskfab(i,j,k);
537  });
538  }
539  }
540 
541  m_crse_data.ParallelCopy(m_cfpatch, srccomp, srccomp, numcomp, m_crse_geom.periodicity(), FabArrayBase::ADD);
542 
543  BL_ASSERT(state.nComp() >= destcomp + numcomp);
544  if (m_cvol) {
545  auto const& dst = state.arrays();
546  auto const& src = m_crse_data.const_arrays();
547  auto const& vol = m_cvol->const_arrays();
548  amrex::ParallelFor(state, IntVect(0), numcomp,
549  [=] AMREX_GPU_DEVICE (int bno, int i, int j, int k, int n)
550  {
551  dst[bno](i,j,k,destcomp+n) += src[bno](i,j,k,srccomp+n) / vol[bno](i,j,k);
552  });
553  } else {
554  amrex::Add(state, m_crse_data, srccomp, destcomp, numcomp, 0);
555  }
556 }
557 
558 template <typename MF>
559 MF&
561 {
562  return m_cfpatch;
563 }
564 
565 template <typename MF>
566 MF&
568 {
569  return m_crse_data;
570 }
571 
573 
574 }
575 
576 #endif
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(where_to_run, box, tbox, block)
Definition: AMReX_GpuLaunch.nolint.H:142
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
std::vector< std::pair< int, Box > > intersections(const Box &bx) const
Return intersections of Box and BoxArray.
BoxList complementIn(const Box &b) const
Return box - boxarray.
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:596
void uniqify()
Make ourselves unique.
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
Long size() const noexcept
The number of Boxes in this BoxList.
Definition: AMReX_BoxList.H:113
void join(const BoxList &blist)
Join the BoxList to ourselves.
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< new_dim > resize() const noexcept
Returns a new BoxND of size new_dim by either shrinking or expanding this BoxND.
Definition: AMReX_Box.H:831
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition: AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE bool cellCentered() const noexcept
Returns true if BoxND is cell-centered in all indexing directions.
Definition: AMReX_Box.H:319
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
@ ADD
Definition: AMReX_FabArrayBase.H:393
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_YAFluxRegister.H:28
MF & getFineData()
Definition: AMReX_YAFluxRegister.H:560
Vector< Vector< FAB * > > m_cfp_fab
The size of this is (# of local fine grids (# of crse/fine patches for that grid))
Definition: AMReX_YAFluxRegister.H:101
Vector< int > m_crse_fab_flag
Definition: AMReX_YAFluxRegister.H:97
MF m_cfpatch
This is built on crse/fine patches.
Definition: AMReX_YAFluxRegister.H:99
void Reflux(MF &state, int dc=0)
Definition: AMReX_YAFluxRegister.H:508
Geometry m_fine_geom
Definition: AMReX_YAFluxRegister.H:104
IntVect m_ratio
Definition: AMReX_YAFluxRegister.H:107
void define(const BoxArray &fba, const BoxArray &cba, const DistributionMapping &fdm, const DistributionMapping &cdm, const Geometry &fgeom, const Geometry &cgeom, const IntVect &ref_ratio, int fine_lev, int nvar)
Definition: AMReX_YAFluxRegister.H:125
bool CrseHasWork(const MFIter &mfi) const noexcept
Definition: AMReX_YAFluxRegister.H:69
MF const * m_cvol
Definition: AMReX_YAFluxRegister.H:111
iMultiFab m_crse_flag
Definition: AMReX_YAFluxRegister.H:96
void setCrseVolume(MF const *cvol)
Definition: AMReX_YAFluxRegister.H:91
MF m_crse_data
Definition: AMReX_YAFluxRegister.H:95
Geometry m_crse_geom
Definition: AMReX_YAFluxRegister.H:105
int m_fine_level
Definition: AMReX_YAFluxRegister.H:108
typename MF::fab_type FAB
Definition: AMReX_YAFluxRegister.H:32
CellType
Definition: AMReX_YAFluxRegister.H:81
@ fine_cell
Definition: AMReX_YAFluxRegister.H:83
@ crse_cell
Definition: AMReX_YAFluxRegister.H:83
@ crse_fine_boundary_cell
Definition: AMReX_YAFluxRegister.H:83
void FineAdd(const MFIter &mfi, const std::array< FAB const *, AMREX_SPACEDIM > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition: AMReX_YAFluxRegister.H:401
bool FineHasWork(const MFIter &mfi) const noexcept
Definition: AMReX_YAFluxRegister.H:73
void CrseAdd(const MFIter &mfi, const std::array< FAB const *, AMREX_SPACEDIM > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition: AMReX_YAFluxRegister.H:343
MF & getCrseData()
Definition: AMReX_YAFluxRegister.H:567
MF m_cfp_mask
Definition: AMReX_YAFluxRegister.H:100
Vector< int > m_cfp_localindex
Definition: AMReX_YAFluxRegister.H:102
typename MF::value_type T
Definition: AMReX_YAFluxRegister.H:31
int m_ncomp
Definition: AMReX_YAFluxRegister.H:109
void reset()
Definition: AMReX_YAFluxRegister.H:335
Definition: AMReX_iMultiFab.H:32
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
int MyProc() noexcept
return the rank number local to the current Parallel Context
Definition: AMReX_ParallelDescriptor.H:125
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
integer, parameter crse_fine_boundary_cell
Definition: AMReX_EBFluxRegister_nd.F90:8
integer, parameter crse_cell
Definition: AMReX_EBFluxRegister_nd.F90:7
integer, parameter fine_cell
Definition: AMReX_EBFluxRegister_nd.F90:9
integer function omp_get_thread_num()
Definition: AMReX_omp_mod.F90:37
integer function omp_get_max_threads()
Definition: AMReX_omp_mod.F90:33
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
RunOn
Definition: AMReX_GpuControl.H:69
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 end(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1890
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void yafluxreg_fineadd(Box const &bx, Array4< T > const &d, Array4< T const > const &f, T dtdx, int nc, int dirside, Dim3 const &rr) noexcept
Definition: AMReX_YAFluxRegister_1D_K.H:36
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition: AMReX_Box.H:1342
void Add(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:240
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 begin(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1881
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void yafluxreg_crseadd(Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, T dtdx, int nc) noexcept
Definition: AMReX_YAFluxRegister_1D_K.H:11
Definition: AMReX_TagParallelFor.H:57
Array4< T > dfab
Definition: AMReX_TagParallelFor.H:58
Definition: AMReX_Array4.H:61
Definition: AMReX_Dim3.H:12
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66