1 #ifndef AMREX_YAFLUXREGISTER_H_
2 #define AMREX_YAFLUXREGISTER_H_
3 #include <AMReX_Config.H>
26 template <
typename MF>
31 using T =
typename MF::value_type;
32 using FAB =
typename MF::fab_type;
39 const IntVect& ref_ratio,
int fine_lev,
int nvar);
44 const IntVect& ref_ratio,
int fine_lev,
int nvar);
49 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
50 const Real* dx, Real dt,
RunOn runon) noexcept;
53 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
54 const Real* dx, Real dt,
int srccomp,
int destcomp,
55 int numcomp,
RunOn runon) noexcept;
58 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
59 const Real* dx, Real dt,
RunOn runon) noexcept;
62 const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
63 const Real* dx, Real dt,
int srccomp,
int destcomp,
64 int numcomp,
RunOn runon) noexcept;
66 void Reflux (MF& state,
int dc = 0);
67 void Reflux (MF& state,
int srccomp,
int destcomp,
int numcomp);
74 return !(
m_cfp_fab[mfi.LocalIndex()].empty());
114 template <
typename MF>
118 const IntVect& ref_ratio,
int fine_lev,
int nvar)
120 define(fba, cba, fdm, cdm, fgeom, cgeom, ref_ratio, fine_lev, nvar);
123 template <
typename MF>
128 const IntVect& ref_ratio,
int fine_lev,
int nvar)
133 m_fine_level = fine_lev;
136 m_crse_data.define(cba, cdm, nvar, 0);
138 m_crse_flag.define(cba, cdm, 1, 1);
140 const auto& cperiod = m_crse_geom.periodicity();
141 const std::vector<IntVect>& pshifts = cperiod.shiftIntVect();
146 Box cdomain = m_crse_geom.Domain();
147 for (
int idim=0; idim < AMREX_SPACEDIM; ++idim) {
148 if (m_crse_geom.isPeriodic(idim)) {
149 cdomain.
grow(idim,1);
161 m_crse_flag.setVal(
fine_cell, cpc0, 0, 1);
162 auto recv_layout_mask = m_crse_flag.RecvLayoutMask(cpc0);
164 #pragma omp parallel if (Gpu::notInLaunchRegion())
167 if (recv_layout_mask[mfi]) {
168 m_crse_fab_flag[mfi.LocalIndex()] =
fine_cell;
177 const auto n_cfba =
static_cast<int>(cfba.
size());
194 for (
int i = 0; i < n_cfba; ++i)
200 const auto ntmp =
static_cast<int>(bl_tmp.
size());
204 for (
int j = 0; j < ntmp; ++j) {
208 if (proc == myproc) {
214 for (
auto const& bl : bl_priv) {
218 for (
auto const& pmp : procmap_priv) {
222 for (
auto& lid : localindex_priv) {
224 for (
int j = 0; j < nl; ++j) {
225 m_cfp_localindex.push_back(nlocal);
234 for (
int i = 0; i < n_cfba; ++i)
240 const auto ntmp =
static_cast<int>(bl_tmp.
size());
244 for (
int j = 0; j < ntmp; ++j) {
245 cfp_procmap.push_back(proc);
248 if (proc == myproc) {
249 for (
int j = 0; j < ntmp; ++j) {
250 m_cfp_localindex.push_back(nlocal);
262 m_cfpatch.define(cfp_ba, cfp_dm, nvar, 0);
264 m_cfp_fab.resize(nlocal);
267 const int li = mfi.LocalIndex();
268 const int flgi = m_cfp_localindex[li];
269 FAB& fab = m_cfpatch[mfi];
270 m_cfp_fab[flgi].push_back(&fab);
273 bool is_periodic = m_fine_geom.isAnyPeriodic();
275 m_cfp_mask.define(cfp_ba, cfp_dm, 1, 0);
276 m_cfp_mask.setVal(
T(1.0));
283 const Box& domainbox = m_crse_geom.Domain();
286 #pragma omp parallel if (!run_on_gpu)
289 std::vector< std::pair<int,Box> > isects;
293 const Box& bx = mfi.fabbox();
296 FAB& fab = m_cfp_mask[mfi];
298 auto const& arr = m_cfp_mask.array(mfi);
300 for (
const auto& iv : pshifts)
305 for (
const auto& is : isects)
307 const Box& ibx = is.second - iv;
310 tags.push_back({arr,ibx});
314 fab.template setVal<RunOn::Host>(
T(0.0), ibx);
327 tag.
dfab(i,j,k,n) =
T(0);
333 template <
typename MF>
337 m_crse_data.setVal(
T(0.0));
338 m_cfpatch.setVal(
T(0.0));
341 template <
typename MF>
344 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
345 const Real* dx, Real dt,
RunOn runon) noexcept
347 BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
350 int numcomp = m_crse_data.nComp();
351 CrseAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
354 template <
typename MF>
357 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
358 const Real* dx, Real dt,
int srccomp,
int destcomp,
359 int numcomp,
RunOn runon) noexcept
361 BL_ASSERT(m_crse_data.nComp() >= destcomp+numcomp &&
362 flux[0]->nComp() >= srccomp+numcomp);
369 if (m_crse_fab_flag[mfi.LocalIndex()] ==
crse_cell) {
373 const Box& bx = mfi.tilebox();
375 auto dtdy =
static_cast<T>(dt/dx[1]);,
376 auto dtdz =
static_cast<T>(dt/dx[2]););
378 FAB const* fy = flux[1];,
379 FAB const* fz = flux[2];);
385 auto dest_arr = m_crse_data.array(mfi,destcomp);
386 auto const flag = m_crse_flag.const_array(mfi);
399 template <
typename MF>
402 const std::array<FAB const*, AMREX_SPACEDIM>& flux,
403 const Real* dx, Real dt,
RunOn runon) noexcept
405 BL_ASSERT(m_crse_data.nComp() == flux[0]->nComp());
408 int numcomp = m_crse_data.nComp();
409 FineAdd(mfi, flux, dx, dt, srccomp, destcomp, numcomp, runon);
412 template <
typename MF>
415 const std::array<FAB const*, AMREX_SPACEDIM>& a_flux,
416 const Real* dx, Real dt,
int srccomp,
int destcomp,
417 int numcomp,
RunOn runon) noexcept
419 BL_ASSERT(m_cfpatch.nComp() >= destcomp+numcomp &&
420 a_flux[0]->nComp() >= srccomp+numcomp);
426 const int li = mfi.LocalIndex();
428 if (cfp_fabs.empty()) {
return; }
430 const Box& tbx = mfi.tilebox();
434 const T ratio =
static_cast<T>(
AMREX_D_TERM(m_ratio[0],*m_ratio[1],*m_ratio[2]));
435 std::array<T,AMREX_SPACEDIM> dtdx{{
AMREX_D_DECL(
static_cast<T>(dt/(dx[0]*ratio)),
436 static_cast<T>(dt/(dx[1]*ratio)),
437 static_cast<T>(dt/(dx[2]*ratio)))}};
438 const Dim3 rr = m_ratio.dim3();
441 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
446 int fluxcomp = srccomp;
447 std::array<FAB const*,AMREX_SPACEDIM> flux{{
AMREX_D_DECL(a_flux[0],a_flux[1],a_flux[2])}};
450 std::array<FAB,AMREX_SPACEDIM> ftmp;
453 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
455 ftmp[idim].resize(
b,numcomp);
456 ftmp[idim].template setVal<RunOn::Host>(
T(0.0));
457 ftmp[idim].template copy<RunOn::Host>(*a_flux[idim], srccomp, 0, numcomp);
458 flux[idim] = &ftmp[idim];
465 for (
int idim=0; idim < AMREX_SPACEDIM; ++idim)
469 FAB const*
f = flux[idim];
470 for (
FAB* cfp : cfp_fabs)
473 const Box& lobx_is = lobx & cfp->box();
477 auto d = cfp->array(destcomp);
478 auto dtdxs = dtdx[idim];
479 int dirside = idim*2+side;
488 const Box& hibx_is = hibx & cfp->box();
492 auto d = cfp->array(destcomp);
493 auto dtdxs = dtdx[idim];
494 int dirside = idim*2+side;
506 template <
typename MF>
512 int numcomp = m_ncomp;
513 Reflux(state, srccomp, destcomp, numcomp);
516 template <
typename MF>
524 if (!m_cfp_mask.empty())
527 #pragma omp parallel if (Gpu::notInLaunchRegion())
531 const Box& bx = m_cfpatch[mfi].box();
532 auto const maskfab = m_cfp_mask.array(mfi);
533 auto cfptfab = m_cfpatch.array(mfi,srccomp);
536 cfptfab(i,j,k,n) *= maskfab(i,j,k);
541 m_crse_data.ParallelCopy(m_cfpatch, srccomp, srccomp, numcomp, m_crse_geom.periodicity(),
FabArrayBase::ADD);
543 BL_ASSERT(state.nComp() >= destcomp + numcomp);
545 auto const& dst = state.arrays();
546 auto const& src = m_crse_data.const_arrays();
547 auto const& vol = m_cvol->const_arrays();
551 dst[bno](i,j,k,destcomp+n) += src[bno](i,j,k,srccomp+n) / vol[bno](i,j,k);
554 amrex::Add(state, m_crse_data, srccomp, destcomp, numcomp, 0);
558 template <
typename MF>
565 template <
typename MF>
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_FLAG(where_to_run, box, tbox, block)
Definition: AMReX_GpuLaunch.nolint.H:142
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
std::vector< std::pair< int, Box > > intersections(const Box &bx) const
Return intersections of Box and BoxArray.
BoxList complementIn(const Box &b) const
Return box - boxarray.
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:596
void uniqify()
Make ourselves unique.
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
Long size() const noexcept
The number of Boxes in this BoxList.
Definition: AMReX_BoxList.H:113
void join(const BoxList &blist)
Join the BoxList to ourselves.
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< new_dim > resize() const noexcept
Returns a new BoxND of size new_dim by either shrinking or expanding this BoxND.
Definition: AMReX_Box.H:831
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition: AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE bool cellCentered() const noexcept
Returns true if BoxND is cell-centered in all indexing directions.
Definition: AMReX_Box.H:319
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
@ ADD
Definition: AMReX_FabArrayBase.H:393
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_YAFluxRegister.H:28
MF & getFineData()
Definition: AMReX_YAFluxRegister.H:560
Vector< Vector< FAB * > > m_cfp_fab
The size of this is (# of local fine grids (# of crse/fine patches for that grid))
Definition: AMReX_YAFluxRegister.H:101
Vector< int > m_crse_fab_flag
Definition: AMReX_YAFluxRegister.H:97
MF m_cfpatch
This is built on crse/fine patches.
Definition: AMReX_YAFluxRegister.H:99
void Reflux(MF &state, int dc=0)
Definition: AMReX_YAFluxRegister.H:508
Geometry m_fine_geom
Definition: AMReX_YAFluxRegister.H:104
IntVect m_ratio
Definition: AMReX_YAFluxRegister.H:107
void define(const BoxArray &fba, const BoxArray &cba, const DistributionMapping &fdm, const DistributionMapping &cdm, const Geometry &fgeom, const Geometry &cgeom, const IntVect &ref_ratio, int fine_lev, int nvar)
Definition: AMReX_YAFluxRegister.H:125
bool CrseHasWork(const MFIter &mfi) const noexcept
Definition: AMReX_YAFluxRegister.H:69
YAFluxRegisterT()=default
MF const * m_cvol
Definition: AMReX_YAFluxRegister.H:111
iMultiFab m_crse_flag
Definition: AMReX_YAFluxRegister.H:96
void setCrseVolume(MF const *cvol)
Definition: AMReX_YAFluxRegister.H:91
MF m_crse_data
Definition: AMReX_YAFluxRegister.H:95
Geometry m_crse_geom
Definition: AMReX_YAFluxRegister.H:105
int m_fine_level
Definition: AMReX_YAFluxRegister.H:108
typename MF::fab_type FAB
Definition: AMReX_YAFluxRegister.H:32
CellType
Definition: AMReX_YAFluxRegister.H:81
@ fine_cell
Definition: AMReX_YAFluxRegister.H:83
@ crse_cell
Definition: AMReX_YAFluxRegister.H:83
@ crse_fine_boundary_cell
Definition: AMReX_YAFluxRegister.H:83
void FineAdd(const MFIter &mfi, const std::array< FAB const *, AMREX_SPACEDIM > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition: AMReX_YAFluxRegister.H:401
bool FineHasWork(const MFIter &mfi) const noexcept
Definition: AMReX_YAFluxRegister.H:73
void CrseAdd(const MFIter &mfi, const std::array< FAB const *, AMREX_SPACEDIM > &flux, const Real *dx, Real dt, RunOn runon) noexcept
Definition: AMReX_YAFluxRegister.H:343
MF & getCrseData()
Definition: AMReX_YAFluxRegister.H:567
MF m_cfp_mask
Definition: AMReX_YAFluxRegister.H:100
Vector< int > m_cfp_localindex
Definition: AMReX_YAFluxRegister.H:102
typename MF::value_type T
Definition: AMReX_YAFluxRegister.H:31
int m_ncomp
Definition: AMReX_YAFluxRegister.H:109
void reset()
Definition: AMReX_YAFluxRegister.H:335
Definition: AMReX_iMultiFab.H:32
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
int MyProc() noexcept
return the rank number local to the current Parallel Context
Definition: AMReX_ParallelDescriptor.H:125
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
integer, parameter crse_fine_boundary_cell
Definition: AMReX_EBFluxRegister_nd.F90:8
integer, parameter crse_cell
Definition: AMReX_EBFluxRegister_nd.F90:7
integer, parameter fine_cell
Definition: AMReX_EBFluxRegister_nd.F90:9
integer function omp_get_thread_num()
Definition: AMReX_omp_mod.F90:37
integer function omp_get_max_threads()
Definition: AMReX_omp_mod.F90:33
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
RunOn
Definition: AMReX_GpuControl.H:69
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 end(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1890
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void yafluxreg_fineadd(Box const &bx, Array4< T > const &d, Array4< T const > const &f, T dtdx, int nc, int dirside, Dim3 const &rr) noexcept
Definition: AMReX_YAFluxRegister_1D_K.H:36
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition: AMReX_Box.H:1342
void Add(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:240
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 begin(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1881
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void yafluxreg_crseadd(Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, T dtdx, int nc) noexcept
Definition: AMReX_YAFluxRegister_1D_K.H:11
Definition: AMReX_TagParallelFor.H:57
Array4< T > dfab
Definition: AMReX_TagParallelFor.H:58
Definition: AMReX_Array4.H:61
Definition: AMReX_Dim3.H:12
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66