|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_first_order_extrap_cpu (amrex::Box const &bx, int nComp, amrex::Array4< const int > const &mask, amrex::Array4< amrex::Real > const &data) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_first_order_extrap_gpu (int i, int j, int k, int n, amrex::Box const &bx, amrex::Array4< const int > const &mask, amrex::Array4< amrex::Real > const &data) noexcept |
|
std::ostream & | operator<< (std::ostream &os, AmrMesh const &amr_mesh) |
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | ParticleToMesh (PC const &pc, const Vector< MultiFab * > &mf, int lev_min, int lev_max, F &&f, bool zero_out_input=true, bool vol_weight=true) |
|
std::ostream & | operator<< (std::ostream &os, const ErrorList &elst) |
|
void | InterpCrseFineBndryEMfield (InterpEM_t interp_type, const Array< MultiFab, AMREX_SPACEDIM > &crse, Array< MultiFab, AMREX_SPACEDIM > &fine, const Geometry &cgeom, const Geometry &fgeom, int ref_ratio) |
|
void | InterpCrseFineBndryEMfield (InterpEM_t interp_type, const Array< MultiFab const *, AMREX_SPACEDIM > &crse, const Array< MultiFab *, AMREX_SPACEDIM > &fine, const Geometry &cgeom, const Geometry &fgeom, int ref_ratio) |
|
void | FillPatchInterp (MultiFab &mf_fine_patch, int fcomp, MultiFab const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, MFInterpolater *mapper, const Vector< BCRec > &bcs, int bcscomp) |
|
template<typename Interp > |
bool | ProperlyNested (const IntVect &ratio, const IntVect &blocking_factor, int ngrow, const IndexType &boxType, Interp *mapper) |
| Test if AMR grids are properly nested. More...
|
|
template<typename MF , typename BC > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchSingleLevel (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp) |
| FillPatch with data from the current level. More...
|
|
template<typename MF , typename BC > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchSingleLevel (MF &mf, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp) |
| FillPatch with data from the current level. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| FillPatch with data from the current level and the level below. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (MF &mf, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| FillPatch with data from the current level and the level below. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (Array< MF *, AMREX_SPACEDIM > const &mf, IntVect const &nghost, Real time, const Vector< Array< MF *, AMREX_SPACEDIM > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, AMREX_SPACEDIM > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, AMREX_SPACEDIM > &cbc, const Array< int, AMREX_SPACEDIM > &cbccomp, Array< BC, AMREX_SPACEDIM > &fbc, const Array< int, AMREX_SPACEDIM > &fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, AMREX_SPACEDIM > &bcs, const Array< int, AMREX_SPACEDIM > &bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (Array< MF *, AMREX_SPACEDIM > const &mf, IntVect const &nghost, Real time, const Vector< Array< MF *, AMREX_SPACEDIM > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, AMREX_SPACEDIM > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, AMREX_SPACEDIM > &cbc, int cbccomp, Array< BC, AMREX_SPACEDIM > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, AMREX_SPACEDIM > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (Array< MF *, AMREX_SPACEDIM > const &mf, Real time, const Vector< Array< MF *, AMREX_SPACEDIM > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, AMREX_SPACEDIM > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, AMREX_SPACEDIM > &cbc, int cbccomp, Array< BC, AMREX_SPACEDIM > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, AMREX_SPACEDIM > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | InterpFromCoarseLevel (MF &mf, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| Fill with interpolation of coarse level data. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | InterpFromCoarseLevel (MF &mf, IntVect const &nghost, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| Fill with interpolation of coarse level data. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | InterpFromCoarseLevel (Array< MF *, AMREX_SPACEDIM > const &mf, Real time, const Array< MF *, AMREX_SPACEDIM > &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, AMREX_SPACEDIM > &cbc, int cbccomp, Array< BC, AMREX_SPACEDIM > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, AMREX_SPACEDIM > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving. More...
|
|
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>> |
std::enable_if_t< IsFabArray< MF >::value > | InterpFromCoarseLevel (Array< MF *, AMREX_SPACEDIM > const &mf, IntVect const &nghost, Real time, const Array< MF *, AMREX_SPACEDIM > &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, AMREX_SPACEDIM > &cbc, int cbccomp, Array< BC, AMREX_SPACEDIM > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, AMREX_SPACEDIM > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={}) |
| Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving. More...
|
|
template<typename MF , typename Interp > |
std::enable_if_t< IsFabArray< MF >::value > | InterpFromCoarseLevel (MF &mf, IntVect const &nghost, IntVect const &nghost_outside_domain, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp) |
| Fill with interpolation of coarse level data. More...
|
|
template<typename MF > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchSingleLevel (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, IntVect const &snghost, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom) |
| FillPatch with data from the current level. More...
|
|
template<typename MF , typename Interp > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchTwoLevels (MF &mf, IntVect const &nghost, IntVect const &nghost_outside_domain, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp) |
| FillPatch with data from the current level and the level below. More...
|
|
template<typename MF , typename BC , typename Interp > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchNLevels (MF &mf, int level, const IntVect &nghost, Real time, const Vector< Vector< MF * >> &smf, const Vector< Vector< Real >> &st, int scomp, int dcomp, int ncomp, const Vector< Geometry > &geom, Vector< BC > &bc, int bccomp, const Vector< IntVect > &ratio, Interp *mapper, const Vector< BCRec > &bcr, int bcrcomp) |
| FillPatch with data from AMR levels. More...
|
|
template<typename MF , typename Interp > |
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > | FillPatchInterp (MF &mf_fine_patch, int fcomp, MF const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp) |
|
template<typename MF > |
std::enable_if_t< IsFabArray< MF >::value > | FillPatchInterp (MF &mf_fine_patch, int fcomp, MF const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, InterpBase *mapper, const Vector< BCRec > &bcs, int bcscomp) |
|
template<typename MF , typename iMF , typename Interp > |
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > | InterpFace (Interp *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bcscomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs) |
|
template<typename MF , typename iMF > |
std::enable_if_t< IsFabArray< MF >::value > | InterpFace (InterpBase *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bccomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs) |
|
AMREX_GPU_HOST_DEVICE void | fluxreg_fineadd (Box const &bx, Array4< Real > const ®, const int rcomp, Array4< Real const > const &flx, const int fcomp, const int ncomp, const int, Dim3 const &ratio, const Real mult) noexcept |
| Add fine grid flux to flux register. Flux array is a fine grid edge based object, Register is a coarse grid edge based object. It is assumed that the coarsened flux region contains the register region. More...
|
|
AMREX_GPU_HOST_DEVICE void | fluxreg_fineareaadd (Box const &bx, Array4< Real > const ®, const int rcomp, Array4< Real const > const &area, Array4< Real const > const &flx, const int fcomp, const int ncomp, const int, Dim3 const &ratio, const Real mult) noexcept |
| Add fine grid flux times area to flux register. Flux array is a fine grid edge based object, Register is a coarse grid edge based object. It is assumed that the coarsened flux region contains the register region. More...
|
|
AMREX_GPU_HOST_DEVICE void | fluxreg_reflux (Box const &bx, Array4< Real > const &s, const int scomp, Array4< Real const > const &f, Array4< Real const > const &v, const int ncomp, const Real mult, const Orientation face) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | pcinterp_interp (Box const &bx, Array4< Real > const &fine, const int fcomp, const int ncomp, Array4< Real const > const &crse, const int ccomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | nodebilin_slopes (Box const &bx, Array4< T > const &slope, Array4< T const > const &u, const int icomp, const int ncomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | nodebilin_interp (Box const &bx, Array4< T > const &fine, const int fcomp, const int ncomp, Array4< T const > const &slope, Array4< T const > const &crse, const int ccomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | facediv_face_interp (int, int, int, int, int, int, Array4< T const > const &, Array4< T > const &, Array4< const int > const &, IntVect const &) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | facediv_int (int, int, int, int, GpuArray< Array4< T >, AMREX_SPACEDIM > const &, IntVect const &, GpuArray< Real, AMREX_SPACEDIM > const &) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_x (int i, int, int, int n, Array4< T > const &fine, Array4< T const > const &crse, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | ccquartic_interp (int i, int, int, int n, Array4< Real const > const &crse, Array4< Real > const &fine) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_y (int i, int j, int, int n, Array4< T > const &fine, Array4< T const > const &crse, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | ccprotect_2d (int ic, int jc, int, int nvar, Box const &fine_bx, IntVect const &ratio, GeometryData cs_geomdata, GeometryData fn_geomdata, Array4< T > const &fine, Array4< T const > const &fine_state) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_z (int i, int j, int k, int n, Array4< T > const &fine, Array4< T const > const &crse, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | ccprotect_3d (int ic, int jc, int kc, int nvar, Box const &fine_bx, IntVect const &ratio, Array4< T > const &fine, Array4< T const > const &fine_state) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_face_interp_x (int fi, int fj, int fk, int n, Array4< T > const &fine, Array4< T const > const &crse, Array4< int const > const &mask, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_face_interp_y (int fi, int fj, int fk, int n, Array4< T > const &fine, Array4< T const > const &crse, Array4< int const > const &mask, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_face_interp_z (int fi, int fj, int fk, int n, Array4< T > const &fine, Array4< T const > const &crse, Array4< int const > const &mask, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_cons_linear_face_interp (int i, int j, int k, int n, Array4< T > const &fine, Array4< T const > const &crse, Array4< int const > const &mask, IntVect const &ratio, Box const &per_grown_domain, int dim) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_x (int i, int j, int k, int n, amrex::Array4< amrex::Real > const &fine, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_y (int i, int j, int k, int n, amrex::Array4< amrex::Real > const &fine, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | face_linear_interp_z (int i, int j, int k, int n, amrex::Array4< amrex::Real > const &fine, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cell_quartic_interp_x (int i, int j, int k, int n, Array4< Real > const &fine, Array4< Real const > const &crse) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cell_quartic_interp_y (int i, int j, int k, int n, Array4< Real > const &fine, Array4< Real const > const &crse) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cell_quartic_interp_z (int i, int j, int k, int n, Array4< Real > const &fine, Array4< Real const > const &crse) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interp_face_reg (int i, int j, IntVect const &rr, Array4< Real > const &fine, int scomp, Array4< Real const > const &crse, Array4< Real > const &slope, int ncomp, Box const &domface, int idim) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interp_face_reg (int i, int j, int k, IntVect const &rr, Array4< Real > const &fine, int scomp, Array4< Real const > const &crse, Array4< Real > const &slope, int ncomp, Box const &domface, int idim) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_limit_minmax_llslope (int i, int, int, Array4< Real > const &slope, Array4< Real const > const &u, int scomp, int ncomp, Box const &domain, IntVect const &ratio, BCRec const *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_llslope (int i, int, int, Array4< Real > const &slope, Array4< Real const > const &u, int scomp, int ncomp, Box const &domain, IntVect const &, BCRec const *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_mcslope (int i, int, int, int ns, Array4< Real > const &slope, Array4< Real const > const &u, int scomp, int, Box const &domain, IntVect const &ratio, BCRec const *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp (int i, int, int, int ns, Array4< Real > const &fine, int fcomp, Array4< Real const > const &slope, Array4< Real const > const &crse, int ccomp, int, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_mcslope_sph (int i, int ns, Array4< Real > const &slope, Array4< Real const > const &u, int scomp, int, Box const &domain, IntVect const &ratio, BCRec const *bc, Real drf, Real rlo) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_sph (int i, int ns, Array4< Real > const &fine, int fcomp, Array4< Real const > const &slope, Array4< Real const > const &crse, int ccomp, int, IntVect const &ratio, Real drf, Real rlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_bilin_interp (int i, int, int, int n, Array4< T > const &fine, int fcomp, Array4< T const > const &crse, int ccomp, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_nodebilin_interp (int i, int, int, int n, Array4< Real > const &fine, int fcomp, Array4< Real const > const &crse, int ccomp, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_mcslope_rz (int i, int j, int ns, Array4< Real > const &slope, Array4< Real const > const &u, int scomp, int ncomp, Box const &domain, IntVect const &ratio, BCRec const *bc, Real drf, Real rlo) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_cons_lin_interp_rz (int i, int j, int ns, Array4< Real > const &fine, int fcomp, Array4< Real const > const &slope, Array4< Real const > const &crse, int ccomp, int ncomp, IntVect const &ratio, Real drf, Real rlo) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_quadratic_calcslope (int i, int j, int, int n, Array4< Real const > const &crse, int ccomp, Array4< Real > const &slope, Box const &domain, BCRec const *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_quadratic_interp (int i, int j, int, int n, Array4< Real > const &fine, int fcomp, Array4< Real const > const &crse, int ccomp, Array4< Real const > const &slope, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mf_cell_quadratic_interp_rz (int i, int j, int, int n, Array4< Real > const &fine, int fcomp, Array4< Real const > const &crse, int ccomp, Array4< Real const > const &slope, IntVect const &ratio, GeometryData const &cs_geomdata, GeometryData const &fn_geomdata) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_compute_slopes_x (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_compute_slopes_y (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_compute_slopes_z (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_xx (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_yy (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_zz (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_xy (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_xz (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mf_cell_quadratic_compute_slopes_yz (int i, int j, int k, Array4< Real const > const &u, int nu, Box const &domain, BCRec const &bc) |
|
FPExcept | getFPExcept () |
| Return currently enabled FP exceptions. Linux only. More...
|
|
FPExcept | setFPExcept (FPExcept excepts) |
|
FPExcept | disableFPExcept (FPExcept excepts) |
| Disable FP exceptions. Linux Only. More...
|
|
FPExcept | enableFPExcept (FPExcept excepts) |
| Enable FP exceptions. Linux Only. More...
|
|
std::string | Version () |
|
AMReX * | Initialize (MPI_Comm mpi_comm, std::ostream &a_osout=std::cout, std::ostream &a_oserr=std::cerr, ErrorHandler a_errhandler=nullptr) |
|
AMReX * | Initialize (int &argc, char **&argv, bool build_parm_parse=true, MPI_Comm mpi_comm=MPI_COMM_WORLD, const std::function< void()> &func_parm_parse={}, std::ostream &a_osout=std::cout, std::ostream &a_oserr=std::cerr, ErrorHandler a_errhandler=nullptr) |
|
bool | Initialized () |
| Returns true if there are any currently-active and initialized AMReX instances (i.e. one for which amrex::Initialize has been called, and amrex::Finalize has not). Otherwise false. More...
|
|
void | Finalize (AMReX *pamrex) |
|
void | Finalize () |
|
void | ExecOnFinalize (std::function< void()>) |
| We maintain a stack of functions that need to be called in Finalize(). The functions are called in LIFO order. The idea here is to allow classes to clean up any "global" state that they maintain when we're exiting from AMReX. More...
|
|
void | ExecOnInitialize (std::function< void()>) |
|
template<class... Ts> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | ignore_unused (const Ts &...) |
| This shuts up the compiler about unused variables. More...
|
|
void | Error (const std::string &msg) |
| Print out message to cerr and exit via amrex::Abort(). More...
|
|
void | Error_host (const char *type, const char *msg) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | Error (const char *msg=nullptr) |
|
void | Warning (const std::string &msg) |
| Print out warning message to cerr. More...
|
|
void | Warning_host (const char *msg) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | Warning (const char *msg) |
|
void | Abort (const std::string &msg) |
| Print out message to cerr and exit via abort(). More...
|
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | Abort (const char *msg=nullptr) |
|
void | Assert_host (const char *EX, const char *file, int line, const char *msg) |
| Prints assertion failed messages to cerr and exits via abort(). Intended for use by the BL_ASSERT() macro in <AMReX_BLassert.H>. More...
|
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | Assert (const char *EX, const char *file, int line, const char *msg=nullptr) |
|
void | write_to_stderr_without_buffering (const char *str) |
| This is used by amrex::Error(), amrex::Abort(), and amrex::Assert() to ensure that when writing the message to stderr, that no additional heap-based memory is allocated. More...
|
|
void | SetErrorHandler (ErrorHandler f) |
|
std::ostream & | OutStream () |
|
std::ostream & | ErrorStream () |
|
int | Verbose () noexcept |
|
void | SetVerbose (int v) noexcept |
|
bool | InitSNaN () noexcept |
|
void | SetInitSNaN (bool v) noexcept |
|
std::string | get_command () |
|
int | command_argument_count () |
|
std::string | get_command_argument (int number) |
| Get command line arguments. The executable name is the zero-th argument. Return empty string if there are not that many arguments. std::string. More...
|
|
void | GccPlacater () |
|
bool | any (FPExcept a) |
|
FPExcept | operator| (FPExcept a, FPExcept b) |
|
FPExcept | operator& (FPExcept a, FPExcept b) |
|
template<class T > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & | min (const T &a, const T &b) noexcept |
|
template<class T , class ... Ts> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & | min (const T &a, const T &b, const Ts &... c) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & | max (const T &a, const T &b) noexcept |
|
template<class T , class ... Ts> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & | max (const T &a, const T &b, const Ts &... c) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE T | elemwiseMin (T const &a, T const &b) noexcept |
|
template<class T , class ... Ts> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE T | elemwiseMin (const T &a, const T &b, const Ts &... c) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE T | elemwiseMax (T const &a, T const &b) noexcept |
|
template<class T , class ... Ts> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE T | elemwiseMax (const T &a, const T &b, const Ts &... c) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | Swap (T &t1, T &t2) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & | Clamp (const T &v, const T &lo, const T &hi) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::enable_if_t< std::is_floating_point_v< T >, bool > | almostEqual (T x, T y, int ulp=2) |
|
template<class T , class F , std::enable_if_t< std::is_floating_point_v< T >, int > FOO = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | bisect (T lo, T hi, F f, T tol=1e-12, int max_iter=100) |
|
template<typename T , typename I , std::enable_if_t< std::is_integral_v< I >, int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE I | bisect (T const *d, I lo, I hi, T const &v) |
|
template<typename ItType , typename ValType > |
AMREX_GPU_HOST_DEVICE ItType | upper_bound (ItType first, ItType last, const ValType &val) |
|
template<typename ItType , typename ValType > |
AMREX_GPU_HOST_DEVICE ItType | lower_bound (ItType first, ItType last, const ValType &val) |
|
template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0> |
AMREX_GPU_HOST_DEVICE void | linspace (ItType first, const ItType &last, const ValType &start, const ValType &stop) |
|
template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0> |
AMREX_GPU_HOST_DEVICE void | logspace (ItType first, const ItType &last, const ValType &start, const ValType &stop, const ValType &base) |
|
template<class T , std::enable_if_t< std::is_same_v< std::decay_t< T >, std::uint8_t >||std::is_same_v< std::decay_t< T >, std::uint16_t >||std::is_same_v< std::decay_t< T >, std::uint32_t >||std::is_same_v< std::decay_t< T >, std::uint64_t >, int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | clz (T x) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | clz_generic (std::uint8_t x) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | clz_generic (std::uint16_t x) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | clz_generic (std::uint32_t x) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | clz_generic (std::uint64_t x) noexcept |
|
Arena * | The_Arena () |
|
Arena * | The_Async_Arena () |
|
Arena * | The_Device_Arena () |
|
Arena * | The_Managed_Arena () |
|
Arena * | The_Pinned_Arena () |
|
Arena * | The_Cpu_Arena () |
|
Arena * | The_Comms_Arena () |
|
std::size_t | aligned_size (std::size_t align_requirement, std::size_t size) noexcept |
| Given a minimum required size of size bytes, this returns the next largest arena size that will align to align_requirement bytes. More...
|
|
bool | is_aligned (const void *p, std::size_t alignment) noexcept |
|
template<class T , typename = typename T::FABType> |
std::array< T *, AMREX_SPACEDIM > | GetArrOfPtrs (std::array< T, AMREX_SPACEDIM > &a) noexcept |
|
template<class T > |
std::array< T *, AMREX_SPACEDIM > | GetArrOfPtrs (const std::array< std::unique_ptr< T >, AMREX_SPACEDIM > &a) noexcept |
|
template<class T > |
std::array< T const *, AMREX_SPACEDIM > | GetArrOfConstPtrs (const std::array< T, AMREX_SPACEDIM > &a) noexcept |
|
template<class T > |
std::array< T const *, AMREX_SPACEDIM > | GetArrOfConstPtrs (const std::array< T *, AMREX_SPACEDIM > &a) noexcept |
|
template<class T > |
std::array< T const *, AMREX_SPACEDIM > | GetArrOfConstPtrs (const std::array< std::unique_ptr< T >, AMREX_SPACEDIM > &a) noexcept |
|
XDim3 | makeXDim3 (const Array< Real, AMREX_SPACEDIM > &a) noexcept |
|
template<class Tto , class Tfrom > |
AMREX_GPU_HOST_DEVICE Array4< Tto > | ToArray4 (Array4< Tfrom > const &a_in) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | lbound (Array4< T > const &a) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | ubound (Array4< T > const &a) noexcept |
|
template<class T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | length (Array4< T > const &a) noexcept |
|
template<typename T > |
std::ostream & | operator<< (std::ostream &os, const Array4< T > &a) |
|
template<typename T > |
PolymorphicArray4< T > | makePolymorphic (Array4< T > const &a) |
|
void | BaseFab_Initialize () |
|
void | BaseFab_Finalize () |
|
Long | TotalBytesAllocatedInFabs () noexcept |
|
Long | TotalBytesAllocatedInFabsHWM () noexcept |
|
Long | TotalCellsAllocatedInFabs () noexcept |
|
Long | TotalCellsAllocatedInFabsHWM () noexcept |
|
void | ResetTotalBytesAllocatedInFabsHWM () noexcept |
|
void | update_fab_stats (Long n, Long s, size_t szt) noexcept |
|
void | update_fab_stats (Long n, Long s, std::size_t szt) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Array4< T > | makeArray4 (T *p, Box const &bx, int ncomp) noexcept |
|
template<typename T > |
std::enable_if_t< std::is_arithmetic_v< T > > | placementNew (T *const, Long) |
|
template<typename T > |
std::enable_if_t< std::is_trivially_default_constructible_v< T > &&!std::is_arithmetic_v< T > > | placementNew (T *const ptr, Long n) |
|
template<typename T > |
std::enable_if_t<!std::is_trivially_default_constructible_v< T > > | placementNew (T *const ptr, Long n) |
|
template<typename T > |
std::enable_if_t< std::is_trivially_destructible_v< T > > | placementDelete (T *const, Long) |
|
template<typename T > |
std::enable_if_t<!std::is_trivially_destructible_v< T > > | placementDelete (T *const ptr, Long n) |
|
template<class Tto , class Tfrom > |
AMREX_GPU_HOST_DEVICE void | cast (BaseFab< Tto > &tofab, BaseFab< Tfrom > const &fromfab, Box const &bx, SrcComp scomp, DestComp dcomp, NumComps ncomp) noexcept |
|
template<typename STRUCT , typename F , std::enable_if_t<(sizeof(STRUCT)<=36 *8) &&AMREX_IS_TRIVIALLY_COPYABLE(STRUCT) &&std::is_trivially_destructible_v< STRUCT >, int > FOO = 0> |
void | fill (BaseFab< STRUCT > &aos_fab, F const &f) |
|
void | setBC (const Box &bx, const Box &domain, int src_comp, int dest_comp, int ncomp, const Vector< BCRec > &bc_dom, Vector< BCRec > &bcr) noexcept |
| Function for setting array of BCs. More...
|
|
std::ostream & | operator<< (std::ostream &os, const BCRec &b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | setBC (const Box &bx, const Box &domain, const BCRec &bc_dom, BCRec &bcr) noexcept |
| Function for setting a BC. More...
|
|
void | FillDomainBoundary (MultiFab &phi, const Geometry &geom, const Vector< BCRec > &bc) |
|
void | AllGatherBoxes (Vector< Box > &bxs, int n_extra_reserve) |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | grow (const BoxND< dim > &b, int i) noexcept |
| Grow BoxND in all directions by given amount. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | grow (const BoxND< dim > &b, const IntVectND< dim > &v) noexcept |
| Grow BoxND in each direction by specified amount. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | grow (const BoxND< dim > &b, int idir, int n_cell) noexcept |
| Grow BoxND in direction idir be n_cell cells. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | grow (const BoxND< dim > &b, Direction d, int n_cell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | growLo (const BoxND< dim > &b, int idir, int n_cell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | growLo (const BoxND< dim > &b, Direction d, int n_cell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | growHi (const BoxND< dim > &b, int idir, int n_cell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | growHi (const BoxND< dim > &b, Direction d, int n_cell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | coarsen (const BoxND< dim > &b, int ref_ratio) noexcept |
| Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/ratio and hi <- hi/ratio. NOTE: if type(dir) = NODE centered: lo <- lo/ratio and hi <- hi/ratio + ((hiratio)==0 ? 0 : 1). That is, refinement of coarsened BoxND must contain the original BoxND. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | coarsen (const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept |
| Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/ratio and hi <- hi/ratio. NOTE: if type(dir) = NODE centered: lo <- lo/ratio and hi <- hi/ratio + ((hiratio)==0 ? 0 : 1). That is, refinement of coarsened BoxND must contain the original BoxND. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | refine (const BoxND< dim > &b, int ref_ratio) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | refine (const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept |
| Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio and hi <- (hi+1)*ratio - 1. NOTE: if type(dir) = NODE centered: lo <- lo*ratio and hi <- hi*ratio. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | shift (const BoxND< dim > &b, int dir, int nzones) noexcept |
| Return a BoxND with indices shifted by nzones in dir direction. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | shift (const BoxND< dim > &b, const IntVectND< dim > &nzones) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | surroundingNodes (const BoxND< dim > &b, int dir) noexcept |
| Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b. NOTE: equivalent to b.convert(dir,NODE) NOTE: error if b.type(dir) == NODE. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | surroundingNodes (const BoxND< dim > &b, Direction d) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | surroundingNodes (const BoxND< dim > &b) noexcept |
| Returns a BoxND with NODE based coordinates in all directions that encloses BoxND b. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | convert (const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept |
| Returns a BoxND with different type. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | convert (const BoxND< dim > &b, const IndexTypeND< dim > &typ) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | enclosedCells (const BoxND< dim > &b, int dir) noexcept |
| Returns a BoxND with CELL based coordinates in direction dir that is enclosed by b. NOTE: equivalent to b.convert(dir,CELL) NOTE: error if b.type(dir) == CELL. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | enclosedCells (const BoxND< dim > &b, Direction d) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | enclosedCells (const BoxND< dim > &b) noexcept |
| Returns a BoxND with CELL based coordinates in all directions that is enclosed by b. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | bdryLo (const BoxND< dim > &b, int dir, int len=1) noexcept |
| Returns the edge-centered BoxND (in direction dir) defining the low side of BoxND b. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | bdryHi (const BoxND< dim > &b, int dir, int len=1) noexcept |
| Returns the edge-centered BoxND (in direction dir) defining the high side of BoxND b. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | bdryNode (const BoxND< dim > &b, Orientation face, int len=1) noexcept |
| Similar to bdryLo and bdryHi except that it operates on the given face of box b. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | adjCellLo (const BoxND< dim > &b, int dir, int len=1) noexcept |
| Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direction dir. The return BoxND is identical to b in the other directions. The return BoxND and b have an empty intersection. NOTE: len >= 1 NOTE: BoxND retval = b.adjCellLo(b,dir,len) is equivalent to the following set of operations: BoxND retval(b); retval.convert(dir,BoxND::CELL); retval.setrange(dir,retval.smallEnd(dir)-len,len);. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | adjCellHi (const BoxND< dim > &b, int dir, int len=1) noexcept |
| Similar to adjCellLo but builds an adjacent BoxND on the high end. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | adjCell (const BoxND< dim > &b, Orientation face, int len=1) noexcept |
| Similar to adjCellLo and adjCellHi; operates on given face. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | minBox (const BoxND< dim > &b1, const BoxND< dim > &b2) noexcept |
| Modify BoxND to that of the minimum BoxND containing both the original BoxND and the argument. Both BoxNDes must have identical type. More...
|
|
template<int dim> |
std::ostream & | operator<< (std::ostream &os, const BoxND< dim > &bx) |
| Write an ASCII representation to the ostream. More...
|
|
template<int dim> |
std::istream & | operator>> (std::istream &is, BoxND< dim > &bx) |
| Read from istream. More...
|
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< detail::get_sum< d, dims... >)> | BoxCat (const BoxND< d > &bx, const BoxND< dims > &...boxes) noexcept |
| Returns a BoxND obtained by concatenating the input BoxNDs. The dimension of the return value equals the sum of the dimensions of the inputted BoxNDs. More...
|
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< BoxND< d >, BoxND< dims >... > | BoxSplit (const BoxND< detail::get_sum< d, dims... >()> &bx) noexcept |
| Returns a tuple of BoxNDs obtained by splitting the input BoxND according to the dimensions specified by the template arguments. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > | BoxShrink (const BoxND< old_dim > &bx) noexcept |
| Returns a new BoxND of dimension new_dim and assigns the first new_dim dimension of this BoxND to it. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > | BoxExpand (const BoxND< old_dim > &bx) noexcept |
| Returns a new BoxND of size new_dim and assigns all values of this BoxND to it and (small=0, big=0, typ=CELL) to the remaining elements. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > | BoxResize (const BoxND< old_dim > &bx) noexcept |
| Returns a new BoxND of size new_dim by either shrinking or expanding this BoxND. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | lbound_iv (BoxND< dim > const &box) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | ubound_iv (BoxND< dim > const &box) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | begin_iv (BoxND< dim > const &box) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | end_iv (BoxND< dim > const &box) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | length_iv (BoxND< dim > const &box) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | max_lbound_iv (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | max_lbound_iv (BoxND< dim > const &b1, IntVectND< dim > const &lo) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | min_ubound_iv (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | min_ubound_iv (BoxND< dim > const &b1, IntVectND< dim > const &hi) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | lbound (BoxND< dim > const &box) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | ubound (BoxND< dim > const &box) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | begin (BoxND< dim > const &box) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | end (BoxND< dim > const &box) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | length (BoxND< dim > const &box) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | max_lbound (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | max_lbound (BoxND< dim > const &b1, Dim3 const &lo) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | min_ubound (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | min_ubound (BoxND< dim > const &b1, Dim3 const &hi) noexcept |
|
template<int dim> |
AMREX_FORCE_INLINE BoxND< dim > | getIndexBounds (BoxND< dim > const &b1) noexcept |
|
template<int dim> |
AMREX_FORCE_INLINE BoxND< dim > | getIndexBounds (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept |
|
template<class T , class ... Ts> |
AMREX_FORCE_INLINE auto | getIndexBounds (T const &b1, T const &b2, Ts const &... b3) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | getCell (BoxND< dim > const *boxes, int nboxes, Long icell) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | makeSlab (BoxND< dim > const &b, int direction, int slab_index) noexcept |
|
template<int dim = AMREX_SPACEDIM, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | makeSingleCellBox (int i, int j, int k, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType()) |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > | makeSingleCellBox (IntVectND< dim > const &vect, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType()) |
|
BoxArray | boxComplement (const Box &b1in, const Box &b2) |
| Make a BoxArray from the the complement of b2 in b1in. More...
|
|
BoxArray | complementIn (const Box &b, const BoxArray &ba) |
| Make a BoxArray from the complement of BoxArray ba in Box b. More...
|
|
BoxArray | intersect (const BoxArray &ba, const Box &b, int ng=0) |
| Make a BoxArray from the intersection of Box b and BoxArray(+ghostcells). More...
|
|
BoxArray | intersect (const BoxArray &ba, const Box &b, const IntVect &ng) |
|
BoxArray | intersect (const BoxArray &lhs, const BoxArray &rhs) |
| Make a BoxArray from the intersection of two BoxArrays. More...
|
|
BoxList | intersect (const BoxArray &ba, const BoxList &bl) |
| Make a BoxList from the intersection of BoxArray and BoxList. More...
|
|
BoxArray | convert (const BoxArray &ba, IndexType typ) |
|
BoxArray | convert (const BoxArray &ba, const IntVect &typ) |
|
BoxArray | coarsen (const BoxArray &ba, int ratio) |
|
BoxArray | coarsen (const BoxArray &ba, const IntVect &ratio) |
|
BoxArray | refine (const BoxArray &ba, int ratio) |
|
BoxArray | refine (const BoxArray &ba, const IntVect &ratio) |
|
BoxList | GetBndryCells (const BoxArray &ba, int ngrow) |
| Find the ghost cells of a given BoxArray. More...
|
|
void | readBoxArray (BoxArray &ba, std::istream &s, bool b=false) |
| Read a BoxArray from a stream. If b is true, read in a special way. More...
|
|
bool | match (const BoxArray &x, const BoxArray &y) |
| Note that two BoxArrays that match are not necessarily equal. More...
|
|
BoxArray | decompose (Box const &domain, int nboxes, Array< bool, AMREX_SPACEDIM > const &decomp={AMREX_D_DECL(true, true, true)}, bool no_overlap=false) |
| Decompose domain box into BoxArray. More...
|
|
std::ostream & | operator<< (std::ostream &os, const BoxArray &ba) |
| Write a BoxArray to an ostream in ASCII format. More...
|
|
std::ostream & | operator<< (std::ostream &os, const BoxArray::RefID &id) |
|
void | intersect (BoxDomain &dest, const BoxDomain &fin, const Box &b) |
| Compute the intersection of BoxDomain fin with Box b and place the result into BoxDomain dest. More...
|
|
void | refine (BoxDomain &dest, const BoxDomain &fin, int ratio) |
| Refine all Boxes in the domain by the refinement ratio and return the result in dest. More...
|
|
void | accrete (BoxDomain &dest, const BoxDomain &fin, int sz=1) |
| Grow each Box in BoxDomain fin by size sz and place the result into BoxDomain dest. More...
|
|
void | coarsen (BoxDomain &dest, const BoxDomain &fin, int ratio) |
| Coarsen all Boxes in the domain by the refinement ratio. The result is placed into a new BoxDomain. More...
|
|
BoxDomain | complementIn (const Box &b, const BoxDomain &bl) |
| Returns the complement of BoxDomain bl in Box b. More...
|
|
std::ostream & | operator<< (std::ostream &os, const BoxDomain &bd) |
| Output a BoxDomain to an ostream is ASCII format. More...
|
|
BoxList | complementIn (const Box &b, const BoxList &bl) |
| Returns a BoxList defining the complement of BoxList bl in Box b. More...
|
|
BoxList | boxDiff (const Box &b1in, const Box &b2) |
| Returns BoxList defining the compliment of b2 in b1in. More...
|
|
void | boxDiff (BoxList &bl_diff, const Box &b1in, const Box &b2) |
|
BoxList | refine (const BoxList &bl, int ratio) |
| Returns a new BoxList in which each Box is refined by the given ratio. More...
|
|
BoxList | coarsen (const BoxList &bl, int ratio) |
| Returns a new BoxList in which each Box is coarsened by the given ratio. More...
|
|
BoxList | intersect (const BoxList &bl, const Box &b) |
| Returns a BoxList defining the intersection of bl with b. More...
|
|
BoxList | accrete (const BoxList &bl, int sz) |
| Returns a new BoxList in which each Box is grown by the given size. More...
|
|
BoxList | removeOverlap (const BoxList &bl) |
| Return BoxList which covers the same area but has no overlapping boxes. More...
|
|
std::ostream & | operator<< (std::ostream &os, const BoxList &blist) |
| Output a BoxList to an ostream in ASCII format. More...
|
|
std::ostream & | operator<< (std::ostream &os, const CArena &arena) |
|
template<auto I, auto N, class F > |
AMREX_GPU_HOST_DEVICE constexpr AMREX_INLINE void | constexpr_for (F const &f) |
|
std::ostream & | operator<< (std::ostream &os, const CoordSys &c) |
|
std::istream & | operator>> (std::istream &is, CoordSys &c) |
|
AMREX_GPU_HOST_DEVICE void | amrex_setvol (Box const &bx, Array4< Real > const &vol, GpuArray< Real, 1 > const &offset, GpuArray< Real, 1 > const &dx, const int coord) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_setarea (Box const &bx, Array4< Real > const &area, GpuArray< Real, 1 > const &offset, GpuArray< Real, 1 > const &dx, const int, const int coord) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_setdloga (Box const &bx, Array4< Real > const &dloga, GpuArray< Real, 1 > const &offset, GpuArray< Real, 1 > const &dx, const int, const int coord) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_setvol (Box const &bx, Array4< Real > const &vol, GpuArray< Real, 2 > const &offset, GpuArray< Real, 2 > const &dx, const int coord) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_setarea (Box const &bx, Array4< Real > const &area, GpuArray< Real, 2 > const &offset, GpuArray< Real, 2 > const &dx, const int dir, const int coord) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_setdloga (Box const &bx, Array4< Real > const &dloga, GpuArray< Real, 2 > const &offset, GpuArray< Real, 2 > const &dx, const int dir, const int coord) noexcept |
|
template<class L , class... Fs, typename... CTOs> |
void | AnyCTO ([[maybe_unused]] TypeList< CTOs... > list_of_compile_time_options, std::array< int, sizeof...(CTOs)> const &runtime_options, L &&l, Fs &&...cto_functs) |
| Compile time optimization of kernels with run time options. More...
|
|
template<int MT, typename T , class F , typename... CTOs> |
std::enable_if_t< std::is_integral_v< T > > | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f) |
|
template<int MT, class F , int dim, typename... CTOs> |
void | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, BoxND< dim > const &box, F &&f) |
|
template<int MT, typename T , class F , int dim, typename... CTOs> |
std::enable_if_t< std::is_integral_v< T > > | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, BoxND< dim > const &box, T ncomp, F &&f) |
|
template<typename T , class F , typename... CTOs> |
std::enable_if_t< std::is_integral_v< T > > | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, T N, F &&f) |
| ParallelFor with compile time optimization of kernels with run time options. More...
|
|
template<class F , int dim, typename... CTOs> |
void | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, BoxND< dim > const &box, F &&f) |
| ParallelFor with compile time optimization of kernels with run time options. More...
|
|
template<typename T , class F , int dim, typename... CTOs> |
std::enable_if_t< std::is_integral_v< T > > | ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, BoxND< dim > const &box, T ncomp, F &&f) |
| ParallelFor with compile time optimization of kernels with run time options. More...
|
|
std::string | demangle (const char *name) |
| Demangle C++ name. More...
|
|
template<typename T , std::enable_if_t< std::is_same_v< T, Dim3 >||std::is_same_v< T, XDim3 >> * = nullptr> |
std::ostream & | operator<< (std::ostream &os, const T &d) |
|
std::ostream & | operator<< (std::ostream &os, const DistributionMapping &pmap) |
| Our output operator. More...
|
|
std::ostream & | operator<< (std::ostream &os, const DistributionMapping::RefID &id) |
|
DistributionMapping | MakeSimilarDM (const BoxArray &ba, const MultiFab &mf, const IntVect &ng) |
| Function that creates a DistributionMapping "similar" to that of a MultiFab. More...
|
|
DistributionMapping | MakeSimilarDM (const BoxArray &ba, const BoxArray &src_ba, const DistributionMapping &src_dm, const IntVect &ng) |
| Function that creates a DistributionMapping "similar" to that of a MultiFab. More...
|
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
std::vector< std::pair< std::string, T > > | getEnumNameValuePairs () |
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
T | getEnum (std::string_view const &s) |
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
T | getEnumCaseInsensitive (std::string_view const &s) |
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
std::string | getEnumNameString (T const &v) |
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
std::vector< std::string > | getEnumNameStrings () |
|
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0> |
std::string | getEnumClassName () |
|
template<typename T , std::enable_if_t<!IsBaseFab< T >::value, int > = 0> |
Long | nBytesOwned (T const &) noexcept |
|
template<typename T > |
Long | nBytesOwned (BaseFab< T > const &fab) noexcept |
|
template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type >>, int > BAR = 0> |
void | Copy (FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost) |
|
template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type >>, int > BAR = 0> |
void | Copy (FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Add (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Add (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost) |
|
int | nComp (FabArrayBase const &fa) |
|
IntVect | nGrowVect (FabArrayBase const &fa) |
|
BoxArray const & | boxArray (FabArrayBase const &fa) |
|
DistributionMapping const & | DistributionMap (FabArrayBase const &fa) |
|
std::ostream & | operator<< (std::ostream &os, const FabArrayBase::BDKey &id) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceSum (FabArray< FAB > const &fa, int nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceSum (FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceMin (FabArray< FAB > const &fa, int nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceMin (FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceMax (FabArray< FAB > const &fa, int nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
FAB::value_type | ReduceMax (FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
FAB1::value_type | ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
bool | ReduceLogicalAnd (FabArray< FAB > const &fa, int nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
bool | ReduceLogicalAnd (FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
bool | ReduceLogicalAnd (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
bool | ReduceLogicalAnd (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
bool | ReduceLogicalOr (FabArray< FAB > const &fa, int nghost, F &&f) |
|
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
bool | ReduceLogicalOr (FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
bool | ReduceLogicalOr (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f) |
|
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>> |
bool | ReduceLogicalOr (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | printCell (FabArray< FAB > const &mf, const IntVect &cell, int comp=-1, const IntVect &ng=IntVect::TheZeroVector()) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Subtract (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Subtract (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Multiply (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Multiply (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Divide (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Divide (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Abs (FabArray< FAB > &fa, int icomp, int numcomp, int nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | Abs (FabArray< FAB > &fa, int icomp, int numcomp, const IntVect &nghost) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | prefetchToHost (FabArray< FAB > const &fa, const bool synchronous=true) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | prefetchToDevice (FabArray< FAB > const &fa, const bool synchronous=true) |
|
template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>> |
void | OverrideSync (FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period) |
|
template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>> |
void | OverrideSync_nowait (FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period) |
|
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | OverrideSync_finish (FabArray< FAB > &fa) |
|
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | dtoh_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp) |
|
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | dtoh_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src) |
|
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | htod_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp) |
|
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
void | htod_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src) |
|
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
IntVect | indexFromValue (FabArray< FAB > const &mf, int comp, IntVect const &nghost, typename FAB::value_type value) |
|
template<typename FAB , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0> |
FAB::value_type | Dot (FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false) |
| Compute dot products of two FabArrays. More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | setVal (MF &dst, typename MF::value_type val) |
| dst = val More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | setBndry (MF &dst, typename MF::value_type val, int scomp, int ncomp) |
| dst = val in ghost cells. More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Scale (MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost) |
| dst *= val More...
|
|
template<class DMF , class SMF , std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0> |
void | LocalCopy (DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = src More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | LocalAdd (MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst += src More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Saxpy (MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst += a * src More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Xpay (MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = src + a * dst More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | LinComb (MF &dst, typename MF::value_type a, MF const &src_a, int acomp, typename MF::value_type b, MF const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = a*src_a + b*src_b More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | ParallelCopy (MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic()) |
| dst = src w/ MPI communication More...
|
|
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
MF::value_type | norminf (MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false) |
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | setVal (Array< MF, N > &dst, typename MF::value_type val) |
| dst = val More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | setBndry (Array< MF, N > &dst, typename MF::value_type val, int scomp, int ncomp) |
| dst = val in ghost cells. More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Scale (Array< MF, N > &dst, typename MF::value_type val, int scomp, int ncomp, int nghost) |
| dst *= val More...
|
|
template<class DMF , class SMF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0> |
void | LocalCopy (Array< DMF, N > &dst, Array< SMF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = src More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | LocalAdd (Array< MF, N > &dst, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst += src More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Saxpy (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst += a * src More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | Xpay (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = src + a * dst More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | LinComb (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src_a, int acomp, typename MF::value_type b, Array< MF, N > const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost) |
| dst = a*src_a + b*src_b More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
void | ParallelCopy (Array< MF, N > &dst, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic()) |
| dst = src w/ MPI communication More...
|
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0> |
MF::value_type | norminf (Array< MF, N > const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false) |
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0> |
int | nComp (Array< MF, N > const &mf) |
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0> |
IntVect | nGrowVect (Array< MF, N > const &mf) |
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0> |
BoxArray const & | boxArray (Array< MF, N > const &mf) |
|
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0> |
DistributionMapping const & | DistributionMap (Array< MF, N > const &mf) |
|
std::ostream & | operator<< (std::ostream &os, const IntDescriptor &id) |
|
std::istream & | operator>> (std::istream &is, IntDescriptor &id) |
|
void | ONES_COMP_NEG (Long &n, int nb, Long incr) |
|
int | _pd_get_bit (char const *base, int offs, int nby, const int *ord) |
|
void | _pd_insert_field (Long in_long, int nb, char *out, int offs, int l_order, int l_bytes) |
|
void | _pd_set_bit (char *base, int offs) |
|
std::ostream & | operator<< (std::ostream &os, const RealDescriptor &rd) |
|
std::istream & | operator>> (std::istream &is, RealDescriptor &rd) |
|
std::ostream & | operator<< (std::ostream &os, const FArrayBox &f) |
|
std::istream & | operator>> (std::istream &is, FArrayBox &f) |
|
void | fab_filcc (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn) |
|
void | fab_filfc (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn) |
|
void | fab_filnd (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn) |
|
std::ostream & | operator<< (std::ostream &, const Geometry &) |
| Nice ASCII output. More...
|
|
std::istream & | operator>> (std::istream &, Geometry &) |
| Nice ASCII input. More...
|
|
Geometry | coarsen (Geometry const &fine, IntVect const &rr) |
|
Geometry | coarsen (Geometry const &fine, int rr) |
|
Geometry | refine (Geometry const &crse, IntVect const &rr) |
|
Geometry | refine (Geometry const &crse, int rr) |
|
const Geometry & | DefaultGeometry () |
|
template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0> |
bool | operator== (A1 const &a1, A2 const &a2) |
|
template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0> |
bool | operator!= (A1 const &a1, A2 const &a2) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | norm (const GpuComplex< T > &a_z) noexcept |
| Return the norm (magnitude squared) of a complex number. More...
|
|
template<typename U > |
std::ostream & | operator<< (std::ostream &out, const GpuComplex< U > &c) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator+ (const GpuComplex< T > &a_x) |
| Identity operation on a complex number. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator- (const GpuComplex< T > &a_x) |
| Negate a complex number. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator- (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept |
| Subtract two complex numbers. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator- (const GpuComplex< T > &a_x, const T &a_y) noexcept |
| Subtract a real number from a complex one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator- (const T &a_x, const GpuComplex< T > &a_y) noexcept |
| Subtract a complex number from a real one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator+ (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept |
| Add two complex numbers. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator+ (const GpuComplex< T > &a_x, const T &a_y) noexcept |
| Add a real number to a complex one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator+ (const T &a_x, const GpuComplex< T > &a_y) noexcept |
| Add a complex number to a real one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator* (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept |
| Multiply two complex numbers. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator* (const GpuComplex< T > &a_x, const T &a_y) noexcept |
| Multiply a complex number by a real one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator* (const T &a_x, const GpuComplex< T > &a_y) noexcept |
| Multiply a real number by a complex one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator/ (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept |
| Divide a complex number by another one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator/ (const GpuComplex< T > &a_x, const T &a_y) noexcept |
| Divide a complex number by a real. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | operator/ (const T &a_x, const GpuComplex< T > &a_y) noexcept |
| Divide a real number by a complex one. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | polar (const T &a_r, const T &a_theta) noexcept |
| Return a complex number given its polar representation. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | exp (const GpuComplex< T > &a_z) noexcept |
| Complex expotential function. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | abs (const GpuComplex< T > &a_z) noexcept |
| Return the absolute value of a complex number. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | sqrt (const GpuComplex< T > &a_z) noexcept |
| Return the square root of a complex number. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | arg (const GpuComplex< T > &a_z) noexcept |
| Return the angle of a complex number's polar representation. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | log (const GpuComplex< T > &a_z) noexcept |
| Complex natural logarithm function. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | pow (const GpuComplex< T > &a_z, const T &a_y) noexcept |
| Raise a complex number to a (real) power. More...
|
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuComplex< T > | pow (const GpuComplex< T > &a_z, int a_n) noexcept |
| Raise a complex number to an integer power. More...
|
|
gpuError_t | gpuGetLastError () |
|
const char * | gpuGetErrorString (gpuError_t error) |
|
template<class L , class... Lambdas> |
AMREX_GPU_GLOBAL void | launch_global (L f0, Lambdas... fs) |
|
template<class L > |
AMREX_GPU_DEVICE void | call_device (L &&f0) noexcept |
|
template<class L , class... Lambdas> |
AMREX_GPU_DEVICE void | call_device (L &&f0, Lambdas &&... fs) noexcept |
|
template<class L > |
void | launch_host (L &&f0) noexcept |
|
template<class L , class... Lambdas> |
void | launch_host (L &&f0, Lambdas &&... fs) noexcept |
|
template<typename T , typename L > |
void | launch (T const &n, L &&f) noexcept |
|
template<int MT, typename T , typename L > |
void | launch (T const &n, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | For (T n, L const &f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (T n, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelFor (T n, L const &f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (T n, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<typename L , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | For (BoxND< dim > const &box, L const &f) noexcept |
|
template<int MT, typename L , int dim> |
void | For (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename L , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename L , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelFor (BoxND< dim > const &box, L const &f) noexcept |
|
template<int MT, typename L , int dim> |
void | ParallelFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename L , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | For (BoxND< dim > const &box, T ncomp, L const &f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelFor (BoxND< dim > const &box, T ncomp, L const &f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | For (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | For (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | For (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | For (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | HostDeviceFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | HostDeviceFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , int dim, typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>> |
void | HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelForRNG (T n, L const &f) noexcept |
|
template<typename L , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelForRNG (BoxND< dim > const &box, L const &f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | ParallelForRNG (BoxND< dim > const &box, T ncomp, L const &f) noexcept |
|
template<typename L > |
void | single_task (L &&f) noexcept |
|
template<typename L > |
void | single_task (gpuStream_t stream, L const &f) noexcept |
|
template<int MT, typename L > |
void | launch (int nblocks, std::size_t shared_mem_bytes, gpuStream_t stream, L const &f) noexcept |
|
template<int MT, typename L > |
void | launch (int nblocks, gpuStream_t stream, L const &f) noexcept |
|
template<typename L > |
void | launch (int nblocks, int nthreads_per_block, std::size_t shared_mem_bytes, gpuStream_t stream, L const &f) noexcept |
|
template<typename L > |
void | launch (int nblocks, int nthreads_per_block, gpuStream_t stream, L &&f) noexcept |
|
template<int MT, typename T , typename L , std::enable_if_t< std::is_integral_v< T >, int > FOO = 0> |
void | launch (T const &n, L const &f) noexcept |
|
template<int MT, int dim, typename L > |
void | launch (BoxND< dim > const &box, L const &f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &, T n, L const &f) noexcept |
|
template<int MT, typename L , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L const &f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L const &f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelForRNG (T n, L const &f) noexcept |
|
template<typename L , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelForRNG (BoxND< dim > const &box, L const &f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelForRNG (BoxND< dim > const &box, T ncomp, L const &f) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>, typename M3 = std::enable_if_t<std::is_integral<T3>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > | ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept |
|
template<typename L , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<typename L1 , typename L2 , typename L3 , int dim> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>, typename M3 = std::enable_if_t<std::is_integral<T3>::value>> |
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > | ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
void | ParallelFor (T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | ParallelFor (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
void | ParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
void | For (T n, L &&f) noexcept |
|
template<typename L , int dim> |
void | For (BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
void | For (BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept |
|
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (T n, L &&f) noexcept |
|
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (T n, L &&f) noexcept |
|
template<typename L , int dim> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept |
|
template<int MT, typename L , int dim> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept |
|
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral<T>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept |
|
template<typename L1 , typename L2 , int dim> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , int dim> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept |
|
template<int MT, typename L1 , typename L2 , typename L3 , int dim> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept |
|
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept |
|
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>, typename M3 = std::enable_if_t<std::is_integral<T3>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral<T1>::value>, typename M2 = std::enable_if_t<std::is_integral<T2>::value>, typename M3 = std::enable_if_t<std::is_integral<T3>::value>> |
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > | HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept |
|
template<class L > |
AMREX_GPU_GLOBAL void | launch_global (L f0) |
|
template<int amrex_launch_bounds_max_threads, class L > |
| __launch_bounds__ (amrex_launch_bounds_max_threads) AMREX_GPU_GLOBAL void launch_global(L f0) |
|
template<int amrex_launch_bounds_max_threads, int min_blocks, class L > |
| __launch_bounds__ (amrex_launch_bounds_max_threads, min_blocks) AMREX_GPU_GLOBAL void launch_global(L f0) |
|
template<typename T , std::enable_if_t< std::is_integral_v< T >, int > = 0> |
bool | isEmpty (T n) noexcept |
|
template<int dim> |
AMREX_FORCE_INLINE bool | isEmpty (BoxND< dim > const &b) noexcept |
|
std::ostream & | operator<< (std::ostream &os, const dim3 &d) |
|
std::unique_ptr< iMultiFab > | OwnerMask (FabArrayBase const &mf, const Periodicity &period, const IntVect &ngrow) |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE | IndexTypeND (const IntVectND< dim > &) -> IndexTypeND< dim > |
|
template<class... Args, std::enable_if_t< IsConvertible_v< IndexType::CellIndex, Args... >, int > = 0> |
AMREX_GPU_HOST_DEVICE | IndexTypeND (IndexType::CellIndex, Args...) -> IndexTypeND< sizeof...(Args)+1 > |
|
template<int dim> |
std::ostream & | operator<< (std::ostream &os, const IndexTypeND< dim > &it) |
| Write an IndexTypeND to an ostream in ASCII. More...
|
|
template<int dim> |
std::istream & | operator>> (std::istream &is, IndexTypeND< dim > &it) |
| Read an IndexTypeND from an istream. More...
|
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IndexTypeND< detail::get_sum< d, dims... >)> | IndexTypeCat (const IndexTypeND< d > &v, const IndexTypeND< dims > &...vects) noexcept |
| Returns a IndexTypeND obtained by concatenating the input IndexTypeNDs. The dimension of the return value equals the sum of the dimensions of the inputted IndexTypeNDs. More...
|
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< IndexTypeND< d >, IndexTypeND< dims >... > | IndexTypeSplit (const IndexTypeND< detail::get_sum< d, dims... >()> &v) noexcept |
| Returns a tuple of IndexTypeND obtained by splitting the input IndexTypeND according to the dimensions specified by the template arguments. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IndexTypeND< new_dim > | IndexTypeShrink (const IndexTypeND< old_dim > &v) noexcept |
| Returns a new IndexTypeND of size new_dim and assigns the first new_dim values of v to it. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IndexTypeND< new_dim > | IndexTypeExpand (const IndexTypeND< old_dim > &v, IndexType::CellIndex fill_extra=IndexType::CellIndex::CELL) noexcept |
| Returns a new IndexTypeND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IndexTypeND< new_dim > | IndexTypeResize (const IndexTypeND< old_dim > &v, IndexType::CellIndex fill_extra=IndexType::CellIndex::CELL) noexcept |
| Returns a new IndexTypeND of size new_dim by either shrinking or expanding iv. More...
|
|
std::int16_t | swapBytes (std::int16_t val) |
|
std::int32_t | swapBytes (std::int32_t val) |
|
std::int64_t | swapBytes (std::int64_t val) |
|
std::uint16_t | swapBytes (std::uint16_t val) |
|
std::uint32_t | swapBytes (std::uint32_t val) |
|
std::uint64_t | swapBytes (std::uint64_t val) |
|
template<typename To , typename From > |
void | writeIntData (const From *data, std::size_t size, std::ostream &os, const amrex::IntDescriptor &id) |
|
template<typename To , typename From > |
void | readIntData (To *data, std::size_t size, std::istream &is, const amrex::IntDescriptor &id) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | coarsen (int i, int ratio) noexcept |
|
template<std::size_t dim> |
AMREX_GPU_HOST_DEVICE | IntVectND (const Array< int, dim > &) -> IntVectND< dim > |
|
template<class... Args, std::enable_if_t< IsConvertible_v< int, Args... >, int > = 0> |
AMREX_GPU_HOST_DEVICE | IntVectND (int, int, Args...) -> IntVectND< sizeof...(Args)+2 > |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | operator+ (int s, const IntVectND< dim > &p) noexcept |
| Returns p + s. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE AMREX_GPU_HOST_DEVICE IntVectND< dim > | operator- (int s, const IntVectND< dim > &p) noexcept |
| Returns -p + s. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | operator* (int s, const IntVectND< dim > &p) noexcept |
| Returns p * s. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | min (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept |
| Returns the IntVectND that is the component-wise minimum of two argument IntVectNDs. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | elemwiseMin (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept |
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | max (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept |
| Returns the IntVectND that is the component-wise maximum of two argument IntVectNDs. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | elemwiseMax (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept |
|
template<int dim = AMREX_SPACEDIM> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | BASISV (int dir) noexcept |
| Returns a basis vector in the given coordinate direction; eg. IntVectND<3> BASISV<3>(1) == (0,1,0). Note that the coordinate directions are zero based. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | scale (const IntVectND< dim > &p, int s) noexcept |
| Returns a IntVectND obtained by multiplying each of the components of this IntVectND by s. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | reflect (const IntVectND< dim > &a, int ref_ix, int idir) noexcept |
| Returns an IntVectND that is the reflection of input in the plane which passes through ref_ix and normal to the coordinate direction idir. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | diagShift (const IntVectND< dim > &p, int s) noexcept |
| Returns IntVectND obtained by adding s to each of the components of this IntVectND. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | coarsen (const IntVectND< dim > &p, int s) noexcept |
| Returns an IntVectND that is the component-wise integer projection of p by s. More...
|
|
template<int dim> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > | coarsen (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept |
| Returns an IntVectND which is the component-wise integer projection of IntVectND p1 by IntVectND p2. More...
|
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | refine (Dim3 const &coarse, IntVectND< dim > const &ratio) noexcept |
|
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 | coarsen (Dim3 const &fine, IntVectND< dim > const &ratio) noexcept |
|
template<int dim> |
std::ostream & | operator<< (std::ostream &os, const IntVectND< dim > &iv) |
|
template<int dim> |
std::istream & | operator>> (std::istream &is, IntVectND< dim > &iv) |
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IntVectND< detail::get_sum< d, dims... >)> | IntVectCat (const IntVectND< d > &v, const IntVectND< dims > &...vects) noexcept |
| Returns a IntVectND obtained by concatenating the input IntVectNDs. The dimension of the return value equals the sum of the dimensions of the inputted IntVectNDs. More...
|
|
template<int d, int... dims> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< IntVectND< d >, IntVectND< dims >... > | IntVectSplit (const IntVectND< detail::get_sum< d, dims... >()> &v) noexcept |
| Returns a tuple of IntVectND obtained by splitting the input IntVectND according to the dimensions specified by the template arguments. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IntVectND< new_dim > | IntVectShrink (const IntVectND< old_dim > &iv) noexcept |
| Returns a new IntVectND of size new_dim and assigns the first new_dim values of iv to it. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IntVectND< new_dim > | IntVectExpand (const IntVectND< old_dim > &iv, int fill_extra=0) noexcept |
| Returns a new IntVectND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements. More...
|
|
template<int new_dim, int old_dim> |
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IntVectND< new_dim > | IntVectResize (const IntVectND< old_dim > &iv, int fill_extra=0) noexcept |
| Returns a new IntVectND of size new_dim by either shrinking or expanding iv. More...
|
|
template<class F > |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | Loop (Dim3 lo, Dim3 hi, F const &f) noexcept |
|
template<class F > |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | Loop (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept |
|
template<class F > |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrent (Dim3 lo, Dim3 hi, F const &f) noexcept |
|
template<class F > |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrent (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | Loop (BoxND< dim > const &bx, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | Loop (BoxND< dim > const &bx, int ncomp, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrent (BoxND< dim > const &bx, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_GPU_HOST_DEVICE AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrent (BoxND< dim > const &bx, int ncomp, F const &f) noexcept |
|
template<class F > |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopOnCpu (Dim3 lo, Dim3 hi, F const &f) noexcept |
|
template<class F > |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopOnCpu (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept |
|
template<class F > |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrentOnCpu (Dim3 lo, Dim3 hi, F const &f) noexcept |
|
template<class F > |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrentOnCpu (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopOnCpu (BoxND< dim > const &bx, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopOnCpu (BoxND< dim > const &bx, int ncomp, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrentOnCpu (BoxND< dim > const &bx, F const &f) noexcept |
|
template<class F , int dim> |
AMREX_ATTRIBUTE_FLATTEN_FOR void | LoopConcurrentOnCpu (BoxND< dim > const &bx, int ncomp, F const &f) noexcept |
|
AMREX_GPU_HOST_DEVICE double | abs (double) |
|
AMREX_GPU_HOST_DEVICE float | abs (float) |
|
AMREX_GPU_HOST_DEVICE long double | abs (long double) |
|
AMREX_GPU_HOST_DEVICE int | abs (int) |
|
AMREX_GPU_HOST_DEVICE long | abs (long) |
|
AMREX_GPU_HOST_DEVICE long long | abs (long long) |
|
template<RunOn run_on, typename T , std::enable_if_t< std::is_same_v< T, double >||std::is_same_v< T, float >, int > FOO = 0> |
void | fill_snan (T *p, std::size_t nelems) |
|
std::ostream & | operator<< (std::ostream &os, const MemProfiler::Bytes &bytes) |
|
std::ostream & | operator<< (std::ostream &os, const MemProfiler::Builds &builds) |
|
void | InterpAddBox (MultiFabCopyDescriptor &fabCopyDesc, BoxList *returnUnfilledBoxes, Vector< FillBoxId > &returnedFillBoxIds, const Box &subbox, MultiFabId faid1, MultiFabId faid2, Real t1, Real t2, Real t, int src_comp, int dest_comp, int num_comp, bool extrap) |
|
void | InterpFillFab (MultiFabCopyDescriptor &fabCopyDesc, const Vector< FillBoxId > &fillBoxIds, MultiFabId faid1, MultiFabId faid2, FArrayBox &dest, Real t1, Real t2, Real t, int src_comp, int dest_comp, int num_comp, bool extrap) |
|
bool | TilingIfNotGPU () noexcept |
|
bool | isMFIterSafe (const FabArrayBase &x, const FabArrayBase &y) |
|
void | GccPlacaterMF () |
|
void | average_node_to_cellcenter (MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, int ngrow=0) |
| Average nodal-based MultiFab onto cell-centered MultiFab. More...
|
|
void | average_edge_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, int ngrow=0) |
| Average edge-based MultiFab onto cell-centered MultiFab. More...
|
|
void | average_face_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow=0) |
| Average face-based MultiFab onto cell-centered MultiFab. More...
|
|
void | average_face_to_cellcenter (MultiFab &cc, const Vector< const MultiFab * > &fc, const Geometry &geom) |
| Average face-based MultiFab onto cell-centered MultiFab with geometric weighting. More...
|
|
void | average_face_to_cellcenter (MultiFab &cc, const Array< const MultiFab *, AMREX_SPACEDIM > &fc, const Geometry &geom) |
| Average face-based MultiFab onto cell-centered MultiFab with geometric weighting. More...
|
|
void | average_cellcenter_to_face (const Vector< MultiFab * > &fc, const MultiFab &cc, const Geometry &geom, int ncomp=1, bool use_harmonic_averaging=false) |
| Average cell-centered MultiFab onto face-based MultiFab with geometric weighting. More...
|
|
void | average_cellcenter_to_face (const Array< MultiFab *, AMREX_SPACEDIM > &fc, const MultiFab &cc, const Geometry &geom, int ncomp=1, bool use_harmonic_averaging=false) |
| Average cell-centered MultiFab onto face-based MultiFab with geometric weighting. More...
|
|
void | average_down (const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr) |
|
void | average_down (const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, const IntVect &ratio) |
| Volume weighed average of fine MultiFab onto coarse MultiFab. More...
|
|
void | sum_fine_to_coarse (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio, const Geometry &cgeom, const Geometry &) |
|
void | average_down_edges (const Vector< const MultiFab * > &fine, const Vector< MultiFab * > &crse, const IntVect &ratio, int ngcrse=0) |
| Average fine edge-based MultiFab onto crse edge-based MultiFab. More...
|
|
void | average_down_edges (const Array< const MultiFab *, AMREX_SPACEDIM > &fine, const Array< MultiFab *, AMREX_SPACEDIM > &crse, const IntVect &ratio, int ngcrse) |
|
void | average_down_edges (const MultiFab &fine, MultiFab &crse, const IntVect &ratio, int ngcrse) |
|
void | print_state (const MultiFab &mf, const IntVect &cell, int n=-1, const IntVect &ng=IntVect::TheZeroVector()) |
| Output state data for a single zone. More...
|
|
void | writeFabs (const MultiFab &mf, const std::string &name) |
| Write each fab individually. More...
|
|
void | writeFabs (const MultiFab &mf, int comp, int ncomp, const std::string &name) |
|
MultiFab | ToMultiFab (const iMultiFab &imf) |
| Convert iMultiFab to MultiFab. More...
|
|
FabArray< BaseFab< Long > > | ToLongMultiFab (const iMultiFab &imf) |
| Convert iMultiFab to Long. More...
|
|
std::unique_ptr< MultiFab > | get_slice_data (int dir, Real coord, const MultiFab &cc, const Geometry &geom, int start_comp, int ncomp, bool interpolate, RealBox const &bnd_rbx) |
|
iMultiFab | makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value) |
|
template<typename FAB > |
void | makeFineMask_doit (FabArray< FAB > &mask, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, typename FAB::value_type crse_value, typename FAB::value_type fine_value) |
|
iMultiFab | makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const IntVect &cnghost, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value) |
|
MultiFab | makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, Real crse_value, Real fine_value) |
|
void | computeDivergence (MultiFab &divu, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom) |
| Computes divergence of face-data stored in the umac MultiFab. More...
|
|
void | computeGradient (MultiFab &grad, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom) |
| Computes gradient of face-data stored in the umac MultiFab. More...
|
|
MultiFab | periodicShift (MultiFab const &mf, IntVect const &offset, Periodicity const &period) |
| Periodic shift MultiFab. More...
|
|
Gpu::HostVector< Real > | sumToLine (MultiFab const &mf, int icomp, int ncomp, Box const &domain, int direction, bool local=false) |
| Sum MultiFab data to line. More...
|
|
Real | volumeWeightedSum (Vector< MultiFab const * > const &mf, int icomp, Vector< Geometry > const &geom, Vector< IntVect > const &ratio, bool local=false) |
| Volume weighted sum for a vector of MultiFabs. More...
|
|
void | FourthOrderInterpFromFineToCoarse (MultiFab &cmf, int scomp, int ncomp, MultiFab const &fmf, IntVect const &ratio) |
| Fourth-order interpolation from fine to coarse level. More...
|
|
void | FillRandom (MultiFab &mf, int scomp, int ncomp) |
| Fill MultiFab with random numbers from uniform distribution. More...
|
|
void | FillRandomNormal (MultiFab &mf, int scomp, int ncomp, Real mean, Real stddev) |
| Fill MultiFab with random numbers from normal distribution. More...
|
|
Vector< MultiFab > | convexify (Vector< MultiFab const * > const &mf, Vector< IntVect > const &refinement_ratio) |
| Convexify AMR data. More...
|
|
template<typename CMF , typename FMF , std::enable_if_t< IsFabArray_v< CMF > &&IsFabArray_v< FMF >, int > = 0> |
void | average_face_to_cellcenter (CMF &cc, int dcomp, const Array< const FMF *, AMREX_SPACEDIM > &fc, int ngrow=0) |
| Average face-based FabArray onto cell-centered FabArray. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0> |
void | average_down_faces (const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0) |
| Average fine face-based FabArray onto crse face-based FabArray. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0> |
void | average_down_faces (const Vector< const MF * > &fine, const Vector< MF * > &crse, int ratio, int ngcrse=0) |
| Average fine face-based FabArray onto crse face-based FabArray. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0> |
void | average_down_faces (const Array< const MF *, AMREX_SPACEDIM > &fine, const Array< MF *, AMREX_SPACEDIM > &crse, const IntVect &ratio, int ngcrse=0) |
| Average fine face-based FabArray onto crse face-based FabArray. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0> |
void | average_down_faces (const Array< const MF *, AMREX_SPACEDIM > &fine, const Array< MF *, AMREX_SPACEDIM > &crse, int ratio, int ngcrse=0) |
| Average fine face-based FabArray onto crse face-based FabArray. More...
|
|
template<typename FAB > |
void | average_down_faces (const FabArray< FAB > &fine, FabArray< FAB > &crse, const IntVect &ratio, int ngcrse=0) |
| This version does average down for one face direction. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0> |
void | average_down_faces (const Array< const MF *, AMREX_SPACEDIM > &fine, const Array< MF *, AMREX_SPACEDIM > &crse, const IntVect &ratio, const Geometry &crse_geom) |
|
template<typename FAB > |
void | average_down_faces (const FabArray< FAB > &fine, FabArray< FAB > &crse, const IntVect &ratio, const Geometry &crse_geom) |
|
template<typename FAB > |
void | average_down_nodal (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, const IntVect &ratio, int ngcrse=0, bool mfiter_is_definitely_safe=false) |
| Average fine node-based MultiFab onto crse node-centered MultiFab. More...
|
|
template<typename FAB > |
void | average_down (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, int scomp, int ncomp, const IntVect &ratio) |
|
template<typename FAB > |
void | average_down (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, int scomp, int ncomp, int rr) |
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0> |
Vector< typename MF::value_type > | get_cell_data (MF const &mf, IntVect const &cell) |
| Get data in a cell of MultiFab/FabArray. More...
|
|
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0> |
MF | get_line_data (MF const &mf, int dir, IntVect const &cell, Box const &bnd_bx=Box()) |
| Get data in a line of MultiFab/FabArray. More...
|
|
template<typename FAB > |
iMultiFab | makeFineMask (const FabArray< FAB > &cmf, const BoxArray &fba, const IntVect &ratio, int crse_value=0, int fine_value=1) |
|
template<typename FAB > |
iMultiFab | makeFineMask (const FabArray< FAB > &cmf, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value) |
|
template<typename FAB > |
iMultiFab | makeFineMask (const FabArray< FAB > &cmf, const FabArray< FAB > &fmf, const IntVect &cnghost, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value) |
|
template<typename FAB > |
iMultiFab | makeFineMask (const FabArray< FAB > &cmf, const FabArray< FAB > &fmf, const IntVect &cnghost, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value, LayoutData< int > &has_cf) |
|
template<typename T , typename U > |
T | cast (U const &mf_in) |
| example: auto mf = amrex::cast<MultiFab>(imf); More...
|
|
template<typename Op , typename T , typename FAB , typename F , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0> |
BaseFab< T > | ReduceToPlane (int direction, Box const &domain, FabArray< FAB > const &mf, F const &f) |
| Reduce FabArray/MultiFab data to a plane. More...
|
|
template<typename F > |
Real | NormHelper (const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, F const &f, int numcomp, IntVect nghost, bool local) |
| Returns part of a norm based on two MultiFabs. More...
|
|
template<typename MMF , typename Pred , typename F > |
Real | NormHelper (const MMF &mask, const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, Pred const &pf, F const &f, int numcomp, IntVect nghost, bool local) |
| Returns part of a norm based on three MultiFabs. More...
|
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_nd_to_cc (int i, int, int, int n, Array4< Real > const &cc, Array4< Real const > const &nd, int cccomp, int ndcomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_eg_to_cc (int i, int, int, Array4< Real > const &cc, Array4< Real const > const &Ex, int cccomp) noexcept |
|
template<typename CT , typename FT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_fc_to_cc (int i, int, int, Array4< CT > const &cc, Array4< FT const > const &fx, int cccomp, GeometryData const &gd) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_cc_to_fc (int i, int, int, int n, Box const &xbx, Array4< Real > const &fx, Array4< Real const > const &cc, GeometryData const &gd, bool use_harmonic_averaging) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_faces (Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio, int) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_faces (int i, int, int, int n, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, IntVect const &ratio, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_edges (Box const &bx, Array4< Real > const &crse, Array4< Real const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_edges (int i, int, int, int n, Array4< Real > const &crse, Array4< Real const > const &fine, int ccomp, int fcomp, IntVect const &ratio, int) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown (Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown (int i, int, int, int n, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_with_vol (int i, int, int, int n, Array4< T > const &crse, Array4< T const > const &fine, Array4< T const > const &fv, int ccomp, int fcomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_nodes (Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_nodes (int i, int, int, int n, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_divergence (Box const &bx, Array4< Real > const &divu, Array4< Real const > const &u, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_gradient (Box const &bx, Array4< Real > const &grad, Array4< Real const > const &u, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_eg_to_cc (int i, int j, int, Array4< Real > const &cc, Array4< Real const > const &Ex, Array4< Real const > const &Ey, int cccomp) noexcept |
|
template<typename CT , typename FT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_fc_to_cc (int i, int j, int, Array4< CT > const &cc, Array4< FT const > const &fx, Array4< FT const > const &fy, int cccomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_cc_to_fc (int i, int j, int, int n, Box const &xbx, Box const &ybx, Array4< Real > const &fx, Array4< Real > const &fy, Array4< Real const > const &cc, bool use_harmonic_averaging) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avgdown_with_vol (int i, int j, int, int n, Array4< Real > const &crse, Array4< Real const > const &fine, Array4< Real const > const &fv, int ccomp, int fcomp, IntVect const &ratio) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_divergence (Box const &bx, Array4< Real > const &divu, Array4< Real const > const &u, Array4< Real const > const &v, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_gradient (Box const &bx, Array4< Real > const &grad, Array4< Real const > const &u, Array4< Real const > const &v, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_convective_difference (Box const &bx, Array4< amrex::Real > const &diff, Array4< Real const > const &u_face, Array4< Real const > const &v_face, Array4< Real const > const &s_on_x_face, Array4< Real const > const &s_on_y_face, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_divergence_rz (Box const &bx, Array4< Real > const &divu, Array4< Real const > const &u, Array4< Real const > const &v, Array4< Real const > const &ax, Array4< Real const > const &ay, Array4< Real const > const &vol) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_gradient_rz (Box const &bx, Array4< Real > const &grad, Array4< Real const > const &u, Array4< Real const > const &v, Array4< Real const > const &ax, Array4< Real const > const &ay, Array4< Real const > const &vol) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_eg_to_cc (int i, int j, int k, Array4< Real > const &cc, Array4< Real const > const &Ex, Array4< Real const > const &Ey, Array4< Real const > const &Ez, int cccomp) noexcept |
|
template<typename CT , typename FT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_fc_to_cc (int i, int j, int k, Array4< CT > const &cc, Array4< FT const > const &fx, Array4< FT const > const &fy, Array4< FT const > const &fz, int cccomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_avg_cc_to_fc (int i, int j, int k, int n, Box const &xbx, Box const &ybx, Box const &zbx, Array4< Real > const &fx, Array4< Real > const &fy, Array4< Real > const &fz, Array4< Real const > const &cc, bool use_harmonic_averaging) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_divergence (Box const &bx, Array4< Real > const &divu, Array4< Real const > const &u, Array4< Real const > const &v, Array4< Real const > const &w, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_gradient (Box const &bx, Array4< Real > const &grad, Array4< Real const > const &u, Array4< Real const > const &v, Array4< Real const > const &w, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_compute_convective_difference (Box const &bx, Array4< Real > const &diff, Array4< Real const > const &u_face, Array4< Real const > const &v_face, Array4< Real const > const &w_face, Array4< Real const > const &s_on_x_face, Array4< Real const > const &s_on_y_face, Array4< Real const > const &s_on_z_face, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE void | amrex_fill_slice_interp (Box const &bx, Array4< Real > slice, Array4< Real const > const &full, int scomp, int fcomp, int ncomp, int dir, Real coord, GeometryData const &gd) noexcept |
|
int | numUniquePhysicalCores () |
|
std::ostream & | operator<< (std::ostream &os, const Orientation &o) |
| Write to an ostream in ASCII format. More...
|
|
std::istream & | operator>> (std::istream &is, Orientation &o) |
|
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
ReduceData< Ts... >::Type | ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
T | ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
ReduceData< Ts... >::Type | ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, int ncomp, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
T | ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, IntVect const &nghost, int ncomp, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
ReduceData< Ts... >::Type | ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>> |
T | ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, F &&f) |
| Parallel reduce for MultiFab/FabArray. More...
|
|
std::ostream & | pout () |
| the stream that all output except error msgs should use More...
|
|
void | setPoutBaseName (const std::string &a_Name) |
| Set the base name for the parallel output files used by pout(). More...
|
|
const std::string & | poutFileName () |
| return the current filename as used by pout() More...
|
|
template<typename T , typename F > |
int | Partition (T *data, int beg, int end, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
template<typename T , typename F > |
int | Partition (T *data, int n, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
template<typename T , typename F > |
int | Partition (Gpu::DeviceVector< T > &v, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
template<typename T , typename F > |
int | StablePartition (T *data, int beg, int end, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
template<typename T , typename F > |
int | StablePartition (T *data, int n, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
template<typename T , typename F > |
int | StablePartition (Gpu::DeviceVector< T > &v, F &&f) |
| A GPU-capable partition function for contiguous data. More...
|
|
std::string | LevelPath (int level, const std::string &levelPrefix="Level_") |
| return the name of the level directory, e.g., Level_5 More...
|
|
std::string | MultiFabHeaderPath (int level, const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell") |
| return the path of the multifab to write to the header, e.g., Level_5/Cell More...
|
|
std::string | LevelFullPath (int level, const std::string &plotfilename, const std::string &levelPrefix="Level_") |
| return the full path of the level directory, e.g., plt00005/Level_5 More...
|
|
std::string | MultiFabFileFullPrefix (int level, const std::string &plotfilename, const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell") |
| return the full path multifab prefix, e.g., plt00005/Level_5/Cell More...
|
|
void | PreBuildDirectorHierarchy (const std::string &dirName, const std::string &subDirPrefix, int nSubDirs, bool callBarrier) |
| prebuild a hierarchy of directories dirName is built first. if dirName exists, it is renamed. then build dirName/subDirPrefix_0 .. dirName/subDirPrefix_nSubDirs-1 if callBarrier is true, call ParallelDescriptor::Barrier() after all directories are built ParallelDescriptor::IOProcessor() creates the directories More...
|
|
void | WriteGenericPlotfileHeader (std::ostream &HeaderFile, int nlevels, const Vector< BoxArray > &bArray, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix) |
|
void | WriteMultiLevelPlotfile (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteMLMF (const std::string &plotfilename, const Vector< const MultiFab * > &mf, const Vector< Geometry > &geom) |
| write a plotfile to disk given: -plotfile name -vector of MultiFabs -vector of Geometrys variable names are written as "Var0", "Var1", etc. refinement ratio is computed from the Geometry vector "time" and "level_steps" are set to zero More...
|
|
void | WriteMultiLevelPlotfileHeaders (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteSingleLevelPlotfile (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
template<typename T > |
std::ostream & | operator<< (std::ostream &os, Array< T, AMREX_SPACEDIM > const &a) |
|
template<typename T , typename S > |
std::ostream & | operator<< (std::ostream &os, const std::pair< T, S > &v) |
|
void | InitRandom (ULong cpu_seed, int nprocs=ParallelDescriptor::NProcs(), ULong gpu_seed=detail::DefaultGpuSeed()) |
| Set the seed of the random number generator. More...
|
|
Real | RandomNormal (Real mean, Real stddev) |
| Generate a psuedo-random double from a normal distribution. More...
|
|
Real | Random () |
| Generate a psuedo-random double from uniform distribution. More...
|
|
unsigned int | RandomPoisson (Real lambda) |
| Generate a psuedo-random integer from a Poisson distribution. More...
|
|
Real | RandomGamma (Real alpha, Real beta) |
| Generate a psuedo-random floating point number from the Gamma distribution. More...
|
|
unsigned int | Random_int (unsigned int n) |
| Generates one pseudorandom unsigned integer which is uniformly distributed on [0,n-1]-interval for each call. More...
|
|
ULong | Random_long (ULong n) |
| Generates one pseudorandom unsigned long which is uniformly distributed on [0,n-1]-interval for each call. More...
|
|
void | SaveRandomState (std::ostream &os) |
| Save and restore random state. More...
|
|
void | RestoreRandomState (std::istream &is, int nthreads_old, int nstep_old) |
|
void | UniqueRandomSubset (Vector< int > &uSet, int setSize, int poolSize, bool printSet=false) |
| Create a unique subset of random numbers from a pool of integers in the range [0, poolSize - 1] the set will be in the order they are found setSize must be <= poolSize uSet will be resized to setSize if you want all processors to have the same set, call this on one processor and broadcast the array. More...
|
|
void | ResetRandomSeed (ULong cpu_seed, ULong gpu_seed) |
|
void | DeallocateRandomSeedDevArray () |
|
void | FillRandom (Real *p, Long N) |
|
void | FillRandomNormal (Real *p, Long N, Real mean, Real stddev) |
| Fill random numbers from normal distribution. More...
|
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | Random (RandomEngine const &random_engine) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | RandomNormal (Real mean, Real stddev, RandomEngine const &random_engine) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE unsigned int | RandomPoisson (Real lambda, RandomEngine const &random_engine) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | RandomGamma (Real alpha, Real beta, RandomEngine const &random_engine) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE unsigned int | Random_int (unsigned int n, RandomEngine const &random_engine) |
|
AMREX_FORCE_INLINE randState_t * | getRandState () |
|
std::ostream & | operator<< (std::ostream &, const RealBox &) |
| Nice ASCII output. More...
|
|
std::istream & | operator>> (std::istream &, RealBox &) |
| Nice ASCII input. More...
|
|
bool | AlmostEqual (const RealBox &box1, const RealBox &box2, Real eps=0.0) noexcept |
| Check for equality of real boxes within a certain tolerance. More...
|
|
std::ostream & | operator<< (std::ostream &ostr, const RealVect &p) |
|
std::istream & | operator>> (std::istream &is, RealVect &iv) |
|
AMREX_GPU_HOST_DEVICE RealVect | scale (const RealVect &p, Real s) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | min (const RealVect &p1, const RealVect &p2) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | max (const RealVect &p1, const RealVect &p2) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | BASISREALV (int dir) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator/ (Real s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator+ (Real s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator- (Real s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator* (Real s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator/ (const RealVect &s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator+ (const RealVect &s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator- (const RealVect &s, const RealVect &p) noexcept |
|
AMREX_GPU_HOST_DEVICE RealVect | operator* (const RealVect &s, const RealVect &p) noexcept |
|
template<typename... Ts, typename... Ps> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< Ts... > | IdentityTuple (GpuTuple< Ts... >, ReduceOps< Ps... >) noexcept |
| Return a GpuTuple containing the identity element for each operation in ReduceOps. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively. More...
|
|
template<typename... Ts, typename... Ps> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< Ts... > | IdentityTuple (GpuTuple< Ts... >, TypeList< Ps... >) noexcept |
| Return a GpuTuple containing the identity element for each ReduceOp in TypeList. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively. More...
|
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE Real | amrex_calc_xslope (int i, int j, int k, int n, int order, amrex::Array4< Real const > const &q) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE Real | amrex_calc_xslope_extdir (int i, int j, int k, int n, int order, amrex::Array4< Real const > const &q, bool edlo, bool edhi, int domlo, int domhi) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE Real | amrex_calc_yslope (int i, int j, int k, int n, int order, amrex::Array4< Real const > const &q) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE Real | amrex_calc_yslope_extdir (int i, int j, int k, int n, int order, amrex::Array4< Real const > const &q, bool edlo, bool edhi, int domlo, int domhi) noexcept |
|
template<class U , int N1, int N2, int N3, Order Ord, int SI> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE SmallMatrix< U, N1, N3, Ord, SI > | operator* (SmallMatrix< U, N1, N2, Ord, SI > const &lhs, SmallMatrix< U, N2, N3, Ord, SI > const &rhs) |
|
template<class T , int NRows, int NCols, Order ORDER, int SI> |
std::ostream & | operator<< (std::ostream &os, SmallMatrix< T, NRows, NCols, ORDER, SI > const &mat) |
|
std::string | toLower (std::string s) |
| Converts all characters of the string into lower case based on std::locale. More...
|
|
std::string | toUpper (std::string s) |
| Converts all characters of the string into uppercase based on std::locale. More...
|
|
std::string | trim (std::string s, std::string const &space) |
|
std::string | Concatenate (const std::string &root, int num, int mindigits=5) |
| Returns rootNNNN where NNNN == num. More...
|
|
std::vector< std::string > | split (std::string const &s, std::string const &sep=" \t") |
| Split a string using given tokens in sep . More...
|
|
template<class TagType , class F > |
std::enable_if_t< std::is_same< std::decay_t< decltype(std::declval< TagType >).box())>, Box >::value > | ParallelFor (Vector< TagType > const &tags, int ncomp, F &&f) |
|
template<class TagType , class F > |
std::enable_if_t< std::is_same< std::decay_t< decltype(std::declval< TagType >).box())>, Box >::value > | ParallelFor (Vector< TagType > const &tags, F &&f) |
|
template<class TagType , class F > |
std::enable_if_t< std::is_integral< std::decay_t< decltype(std::declval< TagType >).size())> >::value > | ParallelFor (Vector< TagType > const &tags, F &&f) |
|
template<std::size_t I, typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & | get (GpuTuple< Ts... > &tup) noexcept |
|
template<std::size_t I, typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type const & | get (GpuTuple< Ts... > const &tup) noexcept |
|
template<std::size_t I, typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type && | get (GpuTuple< Ts... > &&tup) noexcept |
|
template<typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< detail::tuple_decay_t< Ts >... > | makeTuple (Ts &&... args) |
|
template<typename TP > |
constexpr AMREX_GPU_HOST_DEVICE auto | TupleCat (TP &&a) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP > >::type |
|
template<typename TP1 , typename TP2 > |
constexpr AMREX_GPU_HOST_DEVICE auto | TupleCat (TP1 &&a, TP2 &&b) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP1 >, detail::tuple_decay_t< TP2 > >::type |
|
template<typename TP1 , typename TP2 , typename... TPs> |
constexpr AMREX_GPU_HOST_DEVICE auto | TupleCat (TP1 &&a, TP2 &&b, TPs &&... args) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP1 >, detail::tuple_decay_t< TP2 >, detail::tuple_decay_t< TPs >... >::type |
|
template<std::size_t... Is, typename... Args> |
constexpr AMREX_GPU_HOST_DEVICE auto | TupleSplit (const GpuTuple< Args... > &tup) noexcept |
| Returns a GpuTuple of GpuTuples obtained by splitting the input GpuTuple according to the sizes specified by the template arguments. More...
|
|
template<typename F , typename TP > |
constexpr AMREX_GPU_HOST_DEVICE auto | Apply (F &&f, TP &&t) -> typename detail::apply_result< F, detail::tuple_decay_t< TP > >::type |
|
template<typename... Args> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< Args &... > | Tie (Args &... args) noexcept |
|
template<typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< Ts &&... > | ForwardAsTuple (Ts &&... args) noexcept |
|
template<typename... Ts> |
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< Ts... > | MakeZeroTuple (GpuTuple< Ts... >) noexcept |
| Return a GpuTuple containing all zeros. Note that a default-constructed GpuTuple can have uninitialized values. More...
|
|
template<typename T > |
constexpr AMREX_GPU_HOST_DEVICE auto | tupleToArray (GpuTuple< T > const &tup) |
|
template<typename T , typename T2 , typename... Ts, std::enable_if_t< Same< T, T2, Ts... >::value, int > = 0> |
constexpr AMREX_GPU_HOST_DEVICE auto | tupleToArray (GpuTuple< T, T2, Ts... > const &tup) |
| Convert GpuTuple<T,T2,Ts...> to GpuArray. More...
|
|
template<typename... Ts, typename F > |
constexpr void | ForEach (TypeList< Ts... >, F &&f) |
| For each type t in TypeList, call f(t) More...
|
|
template<typename... Ts, typename F > |
constexpr bool | ForEachUntil (TypeList< Ts... >, F &&f) |
| For each type t in TypeList, call f(t) until true is returned. More...
|
|
template<typename... As, typename... Bs> |
constexpr auto | operator+ (TypeList< As... >, TypeList< Bs... >) |
| Concatenate two TypeLists. More...
|
|
template<typename... Ls, typename A > |
constexpr auto | single_product (TypeList< Ls... >, A) |
|
template<typename LLs , typename... As> |
constexpr auto | operator* (LLs, TypeList< As... >) |
|
template<typename... Ls> |
constexpr auto | CartesianProduct (Ls...) |
| Cartesian Product of TypeLists. More...
|
|
bool | is_integer (const char *str) |
| Useful C++ Utility Functions. More...
|
|
template<typename T > |
bool | is_it (std::string const &s, T &v) |
| Return true and store value in v if string s is type T. More...
|
|
const std::vector< std::string > & | Tokenize (const std::string &instr, const std::string &separators) |
| Splits "instr" into separate pieces based on "separators". More...
|
|
bool | UtilCreateDirectory (const std::string &path, mode_t mode, bool verbose=false) |
| Creates the specified directories. path may be either a full pathname or a relative pathname. It will create all the directories in the pathname, if they don't already exist, so that on successful return the pathname refers to an existing directory. Returns true or false depending upon whether or not it was successful. Also returns true if path is NULL or "/". mode is the mode passed to mkdir() for any directories that must be created (for example: 0755). verbose will print out the directory creation steps. More...
|
|
void | CreateDirectoryFailed (const std::string &dir) |
| Output a message and abort when couldn't create the directory. More...
|
|
void | FileOpenFailed (const std::string &file) |
| Output a message and abort when couldn't open the file. More...
|
|
bool | FileExists (const std::string &filename) |
| Check if a file already exists. Return true if the filename is an existing file, directory, or link. For links, this operates on the link and not what the link points to. More...
|
|
std::string | UniqueString () |
| Create a (probably) unique string. More...
|
|
void | UtilCreateCleanDirectory (const std::string &path, bool callbarrier=true) |
| Create a new directory, renaming the old one if it exists. More...
|
|
void | UtilCreateDirectoryDestructive (const std::string &path, bool callbarrier=true) |
|
void | UtilRenameDirectoryToOld (const std::string &path, bool callbarrier=true) |
| Rename a current directory if it exists. More...
|
|
void | OutOfMemory () |
| Aborts after printing message indicating out-of-memory; i.e. operator new has failed. This is the "supported" set_new_handler() function for AMReX applications. More...
|
|
double | InvNormDist (double p) |
| This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution. More...
|
|
double | InvNormDistBest (double p) |
| This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution. More...
|
|
int | CRRBetweenLevels (int fromlevel, int tolevel, const Vector< int > &refratios) |
|
std::istream & | operator>> (std::istream &, const expect &exp) |
|
Vector< char > | SerializeStringArray (const Vector< std::string > &stringArray) |
|
Vector< std::string > | UnSerializeStringArray (const Vector< char > &charArray) |
|
void | SyncStrings (const Vector< std::string > &localStrings, Vector< std::string > &syncedStrings, bool &alreadySynced) |
|
template<typename T > |
Long | bytesOf (const std::vector< T > &v) |
|
template<typename Key , typename T , class Compare > |
Long | bytesOf (const std::map< Key, T, Compare > &m) |
|
void | BroadcastBool (bool &bBool, int myLocalId, int rootId, const MPI_Comm &localComm) |
|
void | BroadcastString (std::string &bStr, int myLocalId, int rootId, const MPI_Comm &localComm) |
|
void | BroadcastStringArray (Vector< std::string > &bSA, int myLocalId, int rootId, const MPI_Comm &localComm) |
|
template<class T > |
void | BroadcastArray (Vector< T > &aT, int myLocalId, int rootId, const MPI_Comm &localComm) |
|
void | Sleep (double sleepsec) |
|
double | second () noexcept |
|
template<typename T > |
void | hash_combine (uint64_t &seed, const T &val) noexcept |
|
template<typename T > |
uint64_t | hash_vector (const Vector< T > &vec, uint64_t seed=0xDEADBEEFDEADBEEF) noexcept |
|
template<class T , typename = typename T::FABType> |
Vector< T * > | GetVecOfPtrs (Vector< T > &a) |
|
template<class T > |
Vector< T * > | GetVecOfPtrs (const Vector< std::unique_ptr< T > > &a) |
|
template<class T , typename = typename T::FABType> |
Vector< const T * > | GetVecOfConstPtrs (const Vector< T > &a) |
|
template<class T > |
Vector< const T * > | GetVecOfConstPtrs (const Vector< std::unique_ptr< T > > &a) |
|
template<class T , typename = typename T::FABType> |
Vector< const T * > | GetVecOfConstPtrs (const Vector< T * > &a) |
|
template<class T > |
Vector< Vector< T * > > | GetVecOfVecOfPtrs (const Vector< Vector< std::unique_ptr< T > > > &a) |
|
template<class T > |
Vector< std::array< T *, AMREX_SPACEDIM > > | GetVecOfArrOfPtrs (const Vector< std::array< std::unique_ptr< T >, AMREX_SPACEDIM > > &a) |
|
template<class T > |
Vector< std::array< T const *, AMREX_SPACEDIM > > | GetVecOfArrOfPtrsConst (const Vector< std::array< std::unique_ptr< T >, AMREX_SPACEDIM > > &a) |
|
template<class T > |
Vector< std::array< T const *, AMREX_SPACEDIM > > | GetVecOfArrOfConstPtrs (const Vector< std::array< std::unique_ptr< T >, AMREX_SPACEDIM > > &a) |
|
template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0> |
Vector< std::array< T const *, AMREX_SPACEDIM > > | GetVecOfArrOfConstPtrs (const Vector< std::array< T, AMREX_SPACEDIM > > &a) |
|
template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0> |
Vector< std::array< T *, AMREX_SPACEDIM > > | GetVecOfArrOfPtrs (Vector< std::array< T, AMREX_SPACEDIM > > &a) |
|
template<class T > |
void | FillNull (Vector< T * > &a) |
|
template<class T > |
void | FillNull (Vector< std::unique_ptr< T > > &a) |
|
template<class T > |
void | RemoveDuplicates (Vector< T > &vec) |
|
template<class T , class H > |
void | RemoveDuplicates (Vector< T > &vec) |
|
void | writeIntData (const int *data, std::size_t size, std::ostream &os, const IntDescriptor &id=FPC::NativeIntDescriptor()) |
| Functions for writing integer data to disk in a portable, self-describing manner. More...
|
|
void | readIntData (int *data, std::size_t size, std::istream &is, const IntDescriptor &id) |
|
void | writeLongData (const Long *data, std::size_t size, std::ostream &os, const IntDescriptor &id=FPC::NativeLongDescriptor()) |
|
void | readLongData (Long *data, std::size_t size, std::istream &is, const IntDescriptor &id) |
|
void | writeRealData (const Real *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::NativeRealDescriptor()) |
|
void | readRealData (Real *data, std::size_t size, std::istream &is, const RealDescriptor &rd) |
|
void | writeFloatData (const float *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native32RealDescriptor()) |
|
void | readFloatData (float *data, std::size_t size, std::istream &is, const RealDescriptor &rd) |
|
void | writeDoubleData (const double *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native64RealDescriptor()) |
|
void | readDoubleData (double *data, std::size_t size, std::istream &is, const RealDescriptor &rd) |
|
void | writeData (int const *data, std::size_t size, std::ostream &os) |
|
void | writeData (Long const *data, std::size_t size, std::ostream &os) |
|
void | writeData (float const *data, std::size_t size, std::ostream &os) |
|
void | writeData (double const *data, std::size_t size, std::ostream &os) |
|
void | readData (int *data, std::size_t size, std::istream &is) |
|
void | readData (Long *data, std::size_t size, std::istream &is) |
|
void | readData (float *data, std::size_t size, std::istream &is) |
|
void | readData (double *data, std::size_t size, std::istream &is) |
|
std::ostream & | operator<< (std::ostream &os, const VisMF::FabOnDisk &fod) |
| Write a FabOnDisk to an ostream in ASCII. More...
|
|
std::istream & | operator>> (std::istream &is, VisMF::FabOnDisk &fod) |
| Read a FabOnDisk from an istream. More...
|
|
std::ostream & | operator<< (std::ostream &os, const Vector< VisMF::FabOnDisk > &fa) |
| Write an Vector<FabOnDisk> to an ostream in ASCII. More...
|
|
std::istream & | operator>> (std::istream &is, Vector< VisMF::FabOnDisk > &fa) |
| Read an Vector<FabOnDisk> from an istream. More...
|
|
std::ostream & | operator<< (std::ostream &os, const VisMF::Header &hd) |
| Write a VisMF::Header to an ostream in ASCII. More...
|
|
std::istream & | operator>> (std::istream &is, VisMF::Header &hd) |
| Read a VisMF::Header from an istream. More...
|
|
template<typename FAB > |
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > | Write (const FabArray< FAB > &fa, const std::string &name) |
| Write iMultiFab/FabArray<IArrayBox> More...
|
|
template<typename FAB > |
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > | Read (FabArray< FAB > &fa, const std::string &name) |
| Read iMultiFab/FabArray<IArrayBox> More...
|
|
void | iparser_compile_exe_size (struct iparser_node *node, char *&p, std::size_t &exe_size, int &max_stack_size, int &stack_size, Vector< char * > &local_variables) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE long long | iparser_exe_eval (const char *p, long long const *x) |
|
std::size_t | iparser_exe_size (struct amrex_iparser *parser, int &max_stack_size, int &stack_size) |
|
void | iparser_compile (struct amrex_iparser *parser, char *p) |
|
void | iparser_defexpr (struct iparser_node *body) |
|
struct iparser_symbol * | iparser_makesymbol (char *name) |
|
struct iparser_node * | iparser_newnode (enum iparser_node_t type, struct iparser_node *l, struct iparser_node *r) |
|
struct iparser_node * | iparser_newnumber (long long d) |
|
struct iparser_node * | iparser_newsymbol (struct iparser_symbol *symbol) |
|
struct iparser_node * | iparser_newf1 (enum iparser_f1_t ftype, struct iparser_node *l) |
|
struct iparser_node * | iparser_newf2 (enum iparser_f2_t ftype, struct iparser_node *l, struct iparser_node *r) |
|
struct iparser_node * | iparser_newf3 (enum iparser_f3_t ftype, struct iparser_node *n1, struct iparser_node *n2, struct iparser_node *n3) |
|
struct iparser_node * | iparser_newassign (struct iparser_symbol *sym, struct iparser_node *v) |
|
struct iparser_node * | iparser_newlist (struct iparser_node *nl, struct iparser_node *nr) |
|
struct amrex_iparser * | amrex_iparser_new () |
|
void | amrex_iparser_delete (struct amrex_iparser *iparser) |
|
struct amrex_iparser * | iparser_dup (struct amrex_iparser *source) |
|
std::size_t | iparser_ast_size (struct iparser_node *node) |
|
struct iparser_node * | iparser_ast_dup (struct amrex_iparser *my_iparser, struct iparser_node *node, int move) |
|
void | iparser_ast_optimize (struct iparser_node *node) |
|
void | iparser_ast_print (struct iparser_node *node, std::string const &space, AllPrint &printer) |
|
int | iparser_ast_depth (struct iparser_node *node) |
|
void | iparser_ast_regvar (struct iparser_node *node, char const *name, int i) |
|
void | iparser_ast_setconst (struct iparser_node *node, char const *name, long long c) |
|
void | iparser_ast_get_symbols (struct iparser_node *node, std::set< std::string > &symbols, std::set< std::string > &local_symbols) |
|
void | iparser_regvar (struct amrex_iparser *iparser, char const *name, int i) |
|
void | iparser_setconst (struct amrex_iparser *iparser, char const *name, long long c) |
|
void | iparser_print (struct amrex_iparser *iparser) |
|
std::set< std::string > | iparser_get_symbols (struct amrex_iparser *iparser) |
|
int | iparser_depth (struct amrex_iparser *iparser) |
|
long long | iparser_atoll (const char *str) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE long long | iparser_call_f1 (enum iparser_f1_t, long long a) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE long long | iparser_call_f2 (enum iparser_f2_t type, long long a, long long b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE long long | iparser_call_f3 (enum iparser_f3_t, long long a, long long b, long long c) |
|
void | parser_compile_exe_size (struct parser_node *node, char *&p, std::size_t &exe_size, int &max_stack_size, int &stack_size, Vector< char const * > &local_variables) |
|
void | parser_exe_print (char const *p, Vector< std::string > const &vars, Vector< char const * > const &locals) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE double | parser_exe_eval (const char *p, double const *x) |
|
std::size_t | parser_exe_size (struct amrex_parser *parser, int &max_stack_size, int &stack_size) |
|
Vector< char const * > | parser_compile (struct amrex_parser *parser, char *p) |
|
void | parser_defexpr (struct parser_node *body) |
|
struct parser_symbol * | parser_makesymbol (char *name) |
|
struct parser_node * | parser_newnode (enum parser_node_t type, struct parser_node *l, struct parser_node *r) |
|
struct parser_node * | parser_newneg (struct parser_node *n) |
|
struct parser_node * | parser_newnumber (double d) |
|
struct parser_node * | parser_newsymbol (struct parser_symbol *symbol) |
|
struct parser_node * | parser_newf1 (enum parser_f1_t ftype, struct parser_node *l) |
|
struct parser_node * | parser_newf2 (enum parser_f2_t ftype, struct parser_node *l, struct parser_node *r) |
|
struct parser_node * | parser_newf3 (enum parser_f3_t ftype, struct parser_node *n1, struct parser_node *n2, struct parser_node *n3) |
|
struct parser_node * | parser_newassign (struct parser_symbol *sym, struct parser_node *v) |
|
struct parser_node * | parser_newlist (struct parser_node *nl, struct parser_node *nr) |
|
struct amrex_parser * | amrex_parser_new () |
|
void | amrex_parser_delete (struct amrex_parser *parser) |
|
struct amrex_parser * | parser_dup (struct amrex_parser *source) |
|
std::size_t | parser_ast_size (struct parser_node *node) |
|
struct parser_node * | parser_ast_dup (struct amrex_parser *my_parser, struct parser_node *node, int move) |
|
bool | parser_node_equal (struct parser_node *a, struct parser_node *b) |
|
void | parser_ast_optimize (struct parser_node *node) |
|
void | parser_ast_print (struct parser_node *node, std::string const &space, std::ostream &printer) |
|
int | parser_ast_depth (struct parser_node *node) |
|
void | parser_ast_sort (struct parser_node *node) |
|
void | parser_ast_regvar (struct parser_node *node, char const *name, int i) |
|
void | parser_ast_setconst (struct parser_node *node, char const *name, double c) |
|
void | parser_ast_get_symbols (struct parser_node *node, std::set< std::string > &symbols, std::set< std::string > &local_symbols) |
|
void | parser_regvar (struct amrex_parser *parser, char const *name, int i) |
|
void | parser_setconst (struct amrex_parser *parser, char const *name, double c) |
|
void | parser_print (struct amrex_parser *parser) |
|
std::set< std::string > | parser_get_symbols (struct amrex_parser *parser) |
|
int | parser_depth (struct amrex_parser *parser) |
|
double | parser_get_number (struct parser_node *node) |
|
void | parser_set_number (struct parser_node *node, double v) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_exp (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_log (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_log10 (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_sin (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_cos (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_tan (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_asin (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_acos (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_atan (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_sinh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_cosh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_tanh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_asinh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_acosh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_atanh (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | parser_math_comp_ellint_1 (T k) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T | parser_math_comp_ellint_2 (T k) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_erf (T a) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_pow (T a, T b) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_atan2 (T a, T b) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_jn (int a, T b) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_NO_INLINE T | parser_math_yn (int a, T b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE double | parser_call_f1 (enum parser_f1_t type, double a) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE double | parser_call_f2 (enum parser_f2_t type, double a, double b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE double | parser_call_f3 (enum parser_f3_t, double a, double b, double c) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interpbndrydata_o1 (int i, int, int, int n, Array4< T > const &bdry, int nb, Array4< T const > const &crse, int nc, Dim3 const &r) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interpbndrydata_x_o3 (int i, int, int, int n, Array4< T > const &bdry, int nb, Array4< T const > const &crse, int nc, Dim3 const &r, Array4< int const > const &, int, int) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interpbndrydata_y_o3 (int i, int j, int, int n, Array4< T > const &bdry, int nb, Array4< T const > const &crse, int nc, Dim3 const &r, Array4< int const > const &mask, int not_covered, int max_width) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | interpbndrydata_z_o3 (int i, int j, int k, int n, Array4< T > const &bdry, int nb, Array4< T const > const &crse, int nc, Dim3 const &r, Array4< int const > const &mask, int not_covered, int) noexcept |
|
std::ostream & | operator<< (std::ostream &os, const LinOpBCType &t) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | poly_interp_coeff (T xInt, T const *AMREX_RESTRICT x, int N, T *AMREX_RESTRICT c) noexcept |
|
template<int N, typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | poly_interp_coeff (T xInt, T const *AMREX_RESTRICT x, T *AMREX_RESTRICT c) noexcept |
|
std::ostream & | operator<< (std::ostream &os, const Mask &m) |
|
std::istream & | operator>> (std::istream &is, Mask &m) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | yafluxreg_crseadd (Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, T dtdx, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | yafluxreg_fineadd (Box const &bx, Array4< T > const &d, Array4< T const > const &f, T dtdx, int nc, int dirside, Dim3 const &rr) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | yafluxreg_crseadd (Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, Array4< T const > const &fy, T dtdx, T dtdy, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | yafluxreg_crseadd (Box const &bx, Array4< T > const &d, Array4< int const > const &flag, Array4< T const > const &fx, Array4< T const > const &fy, Array4< T const > const &fz, T dtdx, T dtdy, T dtdz, int nc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | decomp_chol_np6 (Array2D< Real, 0, 5, 0, 5 > &aa) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cholsol_np6 (Array2D< Real, 0, 11, 0, 5 > &Amatrix, Array1D< Real, 0, 5 > &b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cholsol_for_eb (Array2D< Real, 0, 17, 0, 5 > &Amatrix, Array1D< Real, 0, 5 > &b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_x_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &yloc_on_xface, bool is_eb_dirichlet, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_y_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &xloc_on_yface, bool is_eb_dirichlet, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_eb_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &nrmx, Real &nrmy, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_x_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &yloc_on_xface, bool is_eb_dirichlet, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_y_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &xloc_on_yface, bool is_eb_dirichlet, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_eb_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &nrmx, Real &nrmy, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | decomp_chol_np10 (Array2D< Real, 0, 9, 0, 9 > &aa) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cholsol_np10 (Array2D< Real, 0, 35, 0, 9 > &Amatrix, Array1D< Real, 0, 9 > &b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cholsol_for_eb (Array2D< Real, 0, 53, 0, 9 > &Amatrix, Array1D< Real, 0, 9 > &b) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_x_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &yloc_on_xface, Real &zloc_on_xface, bool is_eb_dirichlet, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_y_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &xloc_on_yface, Real &zloc_on_yface, bool is_eb_dirichlet, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_z_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &xloc_on_zface, Real &yloc_on_zface, bool is_eb_dirichlet, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_eb_of_phi_on_centroids (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Real &nrmx, Real &nrmy, Real &nrmz, bool is_eb_inhomog) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_x_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &yloc_on_xface, Real &zloc_on_xface, bool is_eb_dirichlet, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y, const bool on_z_face, const int domlo_z, const int domhi_z) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_y_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &xloc_on_yface, Real &zloc_on_yface, bool is_eb_dirichlet, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y, const bool on_z_face, const int domlo_z, const int domhi_z) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_z_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &xloc_on_zface, Real &yloc_on_zface, bool is_eb_dirichlet, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y, const bool on_z_face, const int domlo_z, const int domhi_z) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | grad_eb_of_phi_on_centroids_extdir (int i, int j, int k, int n, Array4< Real const > const &phi, Array4< Real const > const &phieb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &ccent, Array4< Real const > const &bcent, Array4< Real const > const &vfrac, Real &nrmx, Real &nrmy, Real &nrmz, bool is_eb_inhomog, const bool on_x_face, const int domlo_x, const int domhi_x, const bool on_y_face, const int domlo_y, const int domhi_y, const bool on_z_face, const int domlo_z, const int domhi_z) |
|
void | single_level_redistribute (amrex::MultiFab &div_tmp_in, amrex::MultiFab &div_out, int div_comp, int ncomp, const amrex::Geometry &geom) |
|
void | single_level_weighted_redistribute (amrex::MultiFab &div_tmp_in, amrex::MultiFab &div_out, const amrex::MultiFab &weights, int div_comp, int ncomp, const amrex::Geometry &geom, bool use_wts_in_divnc) |
|
void | apply_flux_redistribution (const amrex::Box &bx, amrex::Array4< amrex::Real > const &div, amrex::Array4< amrex::Real const > const &divc, amrex::Array4< amrex::Real const > const &wt, int icomp, int ncomp, amrex::Array4< amrex::EBCellFlag const > const &flag_arr, amrex::Array4< amrex::Real const > const &vfrac, const amrex::Geometry &geom, bool use_wts_in_divnc) |
|
void | amrex_flux_redistribute (const amrex::Box &bx, amrex::Array4< amrex::Real > const &dqdt, amrex::Array4< amrex::Real const > const &divc, amrex::Array4< amrex::Real const > const &wt, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::EBCellFlag const > const &flag, int as_crse, amrex::Array4< amrex::Real > const &rr_drho_crse, amrex::Array4< int const > const &rr_flag_crse, int as_fine, amrex::Array4< amrex::Real > const &dm_as_fine, amrex::Array4< int const > const &levmsk, const amrex::Geometry &geom, bool use_wts_in_divnc, int level_mask_not_covered, int icomp, int ncomp, amrex::Real dt) |
|
void | ApplyRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &dUdt_out, amrex::Array4< amrex::Real > const &dUdt_in, amrex::Array4< amrex::Real const > const &U_in, amrex::Array4< amrex::Real > const &scratch, amrex::Array4< amrex::EBCellFlag const > const &flag, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz), amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &lev_geom, amrex::Real dt, std::string const &redistribution_type, bool use_wts_in_divnc=false, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt, amrex::Array4< amrex::Real const > const &update_scale={}) |
|
void | ApplyMLRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &dUdt_out, amrex::Array4< amrex::Real > const &dUdt_in, amrex::Array4< amrex::Real const > const &U_in, amrex::Array4< amrex::Real > const &scratch, amrex::Array4< amrex::EBCellFlag const > const &flag, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz), amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &lev_geom, amrex::Real dt, std::string const &redistribution_type, int as_crse, amrex::Array4< amrex::Real > const &rr_drho_crse, amrex::Array4< int const > const &rr_flag_crse, int as_fine, amrex::Array4< amrex::Real > const &dm_as_fine, amrex::Array4< int const > const &levmsk, int level_mask_not_covered, amrex::Real fac_for_deltaR=1.0_rt, bool use_wts_in_divnc=false, int icomp=0, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt, amrex::Array4< amrex::Real const > const &update_scale={}) |
|
void | ApplyInitialRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz), amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &geom, std::string const &redistribution_type, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt) |
|
void | StateRedistribute (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real const > const &nrs, amrex::Array4< amrex::Real const > const &alpha, amrex::Array4< amrex::Real const > const &nbhd_vol, amrex::Array4< amrex::Real const > const ¢_hat, amrex::Geometry const &geom, int max_order=2) |
|
void | MLStateRedistribute (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real const > const &nrs, amrex::Array4< amrex::Real const > const &alpha, amrex::Array4< amrex::Real const > const &nbhd_vol, amrex::Array4< amrex::Real const > const ¢_hat, amrex::Geometry const &geom, int as_crse, Array4< Real > const &drho_as_crse, Array4< int const > const &flag_as_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, int is_ghost_cell, amrex::Real fac_for_deltaR, int max_order=2) |
|
void | MakeITracker (amrex::Box const &bx, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz), amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< int > const &itracker, amrex::Geometry const &geom, amrex::Real target_volfrac) |
|
void | MakeStateRedistUtils (amrex::Box const &bx, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &ccent, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real > const &nrs, amrex::Array4< amrex::Real > const &alpha, amrex::Array4< amrex::Real > const &nbhd_vol, amrex::Array4< amrex::Real > const ¢_hat, amrex::Geometry const &geom, amrex::Real target_volfrac) |
|
void | ApplyRedistribution (Box const &bx, int ncomp, Array4< Real > const &dUdt_out, Array4< Real > const &dUdt_in, Array4< Real const > const &U_in, Array4< Real > const &scratch, Array4< EBCellFlag const > const &flag, AMREX_D_DECL(Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz), Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz), Array4< Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, Real dt, std::string const &redistribution_type, bool use_wts_in_divnc, int srd_max_order, amrex::Real target_volfrac, Array4< Real const > const &srd_update_scale) |
|
void | ApplyMLRedistribution (Box const &bx, int ncomp, Array4< Real > const &dUdt_out, Array4< Real > const &dUdt_in, Array4< Real const > const &U_in, Array4< Real > const &scratch, Array4< EBCellFlag const > const &flag, AMREX_D_DECL(Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz), Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz), Array4< Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, Real dt, std::string const &redistribution_type, int as_crse, Array4< Real > const &rr_drho_crse, Array4< int const > const &rr_flag_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, int level_mask_not_covered, Real fac_for_deltaR, bool use_wts_in_divnc, int icomp, int srd_max_order, amrex::Real target_volfrac, Array4< Real const > const &srd_update_scale) |
|
void | ApplyInitialRedistribution (Box const &bx, int ncomp, Array4< Real > const &U_out, Array4< Real > const &U_in, Array4< EBCellFlag const > const &flag, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz), amrex::Array4< amrex::Real const > const &vfrac, AMREX_D_DECL(amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz), amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, std::string const &redistribution_type, int srd_max_order, amrex::Real target_volfrac) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::Real | amrex_calc_alpha_stencil (Real q_hat, Real q_max, Real q_min, Real state) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > | amrex_calc_centroid_limiter (int i, int j, int k, int n, amrex::Array4< amrex::Real const > const &state, amrex::Array4< amrex::EBCellFlag const > const &flag, const amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > &slopes, amrex::Array4< amrex::Real const > const &ccent) noexcept |
|
void | MakeStateRedistUtils (Box const &bx, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrac, Array4< Real const > const &ccent, Array4< int const > const &itracker, Array4< Real > const &nrs, Array4< Real > const &alpha, Array4< Real > const &nbhd_vol, Array4< Real > const ¢_hat, Geometry const &lev_geom, Real target_vol) |
|
void | FillSignedDistance (MultiFab &mf, bool fluid_has_positive_sign=true) |
| Fill MultiFab with signed distance. More...
|
|
void | FillSignedDistance (MultiFab &mf, EB2::Level const &ls_lev, EBFArrayBoxFactory const &eb_fac, int refratio, bool fluid_has_positive_sign=true) |
| Fill MultiFab with signed distance. More...
|
|
template<typename G > |
void | FillImpFunc (MultiFab &mf, G const &gshop, Geometry const &geom) |
| Fill MultiFab with implicit function. More...
|
|
void | TagCutCells (TagBoxArray &tags, const MultiFab &state) |
|
void | TagVolfrac (TagBoxArray &tags, const MultiFab &volfrac, Real tol) |
|
std::ostream & | operator<< (std::ostream &os, const EBCellFlag &flag) |
|
std::unique_ptr< EBFArrayBoxFactory > | makeEBFabFactory (const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support) |
|
std::unique_ptr< EBFArrayBoxFactory > | makeEBFabFactory (const EB2::Level *eb_level, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support) |
|
std::unique_ptr< EBFArrayBoxFactory > | makeEBFabFactory (const EB2::IndexSpace *index_space, const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support) |
|
const EBCellFlagFab & | getEBCellFlagFab (const FArrayBox &fab) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_crseadd_va (int i, int j, int k, Array4< Real > const &d, Array4< int const > const &flag, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &vfrac, Array4< Real const > const &ax, Array4< Real const > const &ay, Real dtdx, Real dtdy, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | eb_flux_reg_cvol (int i, int j, Array4< Real const > const &vfrac, Dim3 const &ratio, Real threshold) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_xlo (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_xhi (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_ylo (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_yhi (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_dm (int i, int j, int k, int n, Box const &dmbx, Array4< Real > const &d, Array4< Real const > const &dm, Array4< Real const > const &vfrac, Dim3 const &ratio, Real threshold) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_rereflux_from_crse (int i, int j, int k, int n, Box const &bx, Array4< Real > const &d, Array4< Real const > const &s, Array4< int const > const &amrflg, Array4< EBCellFlag const > const &ebflg, Array4< Real const > const &vfrac) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_rereflux_to_fine (int i, int j, int, int n, Array4< Real > const &d, Array4< Real const > const &s, Array4< int const > const &msk, Dim3 ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_crseadd_va (int i, int j, int k, Array4< Real > const &d, Array4< int const > const &flag, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &fz, Array4< Real const > const &vfrac, Array4< Real const > const &ax, Array4< Real const > const &ay, Array4< Real const > const &az, Real dtdx, Real dtdy, Real dtdz, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | eb_flux_reg_cvol (int i, int j, int k, Array4< Real const > const &vfrac, Dim3 const &ratio, Real small) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_zlo (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_flux_reg_fineadd_va_zhi (int i, int j, int k, int n, Array4< Real > const &d, Array4< Real const > const &f, Array4< Real const > const &vfrac, Array4< Real const > const &a, Real fac, Dim3 const &ratio) |
|
void | EB_set_covered (MultiFab &mf, Real val) |
|
void | EB_set_covered (MultiFab &mf, int icomp, int ncomp, int ngrow, Real val) |
|
void | EB_set_covered (MultiFab &mf, int icomp, int ncomp, const Vector< Real > &vals) |
|
void | EB_set_covered (MultiFab &mf, int icomp, int ncomp, int ngrow, const Vector< Real > &a_vals) |
|
void | EB_set_covered_faces (const Array< MultiFab *, AMREX_SPACEDIM > &umac, Real val) |
|
void | EB_set_covered_faces (const Array< MultiFab *, AMREX_SPACEDIM > &umac, const int scomp, const int ncomp, const Vector< Real > &a_vals) |
|
void | EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio) |
|
void | EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, int ratio) |
|
void | EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio) |
|
void | EB_average_down_faces (const Array< const MultiFab *, AMREX_SPACEDIM > &fine, const Array< MultiFab *, AMREX_SPACEDIM > &crse, int ratio, int ngcrse) |
|
void | EB_average_down_faces (const Array< const MultiFab *, AMREX_SPACEDIM > &fine, const Array< MultiFab *, AMREX_SPACEDIM > &crse, const IntVect &ratio, int ngcrse) |
|
void | EB_average_down_faces (const Array< const MultiFab *, AMREX_SPACEDIM > &fine, const Array< MultiFab *, AMREX_SPACEDIM > &crse, const IntVect &ratio, const Geometry &crse_geom) |
|
void | EB_average_down_boundaries (const MultiFab &fine, MultiFab &crse, int ratio, int ngcrse) |
|
void | EB_average_down_boundaries (const MultiFab &fine, MultiFab &crse, const IntVect &ratio, int ngcrse) |
|
void | EB_computeDivergence (MultiFab &divu, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom, bool already_on_centroids) |
|
void | EB_computeDivergence (MultiFab &divu, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom, bool already_on_centroids, const MultiFab &vel_eb) |
|
void | EB_average_face_to_cellcenter (MultiFab &ccmf, int dcomp, const Array< MultiFab const *, AMREX_SPACEDIM > &fmf) |
|
void | EB_interp_CC_to_Centroid (MultiFab ¢, const MultiFab &cc, int scomp, int dcomp, int ncomp, const Geometry &geom) |
|
void | EB_interp_CC_to_FaceCentroid (const MultiFab &cc, AMREX_D_DECL(MultiFab &fc_x, MultiFab &fc_y, MultiFab &fc_z), int scomp, int dcomp, int ncomp, const Geometry &a_geom, const Vector< BCRec > &a_bcs) |
|
void | EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, const Array< MultiFab *, AMREX_SPACEDIM > &phi_faces, int scomp, int dcomp, int nc, const Geometry &geom, const amrex::Vector< amrex::BCRec > &a_bcs) |
|
void | EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, const Vector< MultiFab * > &phi_faces, int scomp, int dcomp, int nc, const Geometry &geom, const amrex::Vector< amrex::BCRec > &a_bcs) |
|
void | EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, AMREX_D_DECL(MultiFab &phi_xface, MultiFab &phi_yface, MultiFab &phi_zface), int scomp, int dcomp, int ncomp, const Geometry &a_geom, const Vector< BCRec > &a_bcs) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_set_covered_nodes (int i, int j, int k, int n, int icomp, Array4< Real > const &d, Array4< EBCellFlag const > const &f, Real v) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_set_covered_nodes (int i, int j, int k, int n, int icomp, Array4< Real > const &d, Array4< EBCellFlag const > const &f, Real const *AMREX_RESTRICT v) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown_with_vol (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &fv, Array4< Real const > const &vfrc, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &vfrc, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown_face_x (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &area, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown_face_y (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &area, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown_boundaries (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &ba, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_compute_divergence (int i, int j, int k, int n, Array4< Real > const &divu, Array4< Real const > const &u, Array4< Real const > const &v, Array4< int const > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &fcx, Array4< Real const > const &fcy, GpuArray< Real, 2 > const &dxinv, bool already_on_centroids) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avg_fc_to_cc (int i, int j, int k, int n, Array4< Real > const &cc, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &ax, Array4< Real const > const &ay, Array4< EBCellFlag const > const &flag) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2cent (Box const &box, const Array4< Real > &phicent, Array4< Real const > const &phicc, Array4< EBCellFlag const > const &flag, Array4< Real const > const ¢, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2facecent_x (Box const &ubx, Array4< Real const > const &phi, Array4< Real const > const &apx, Array4< Real const > const &fcx, Array4< Real > const &edg_x, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2facecent_y (Box const &vbx, Array4< Real const > const &phi, Array4< Real const > const &apy, Array4< Real const > const &fcy, Array4< Real > const &edg_y, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_centroid2facecent_x (Box const &ubx, Array4< Real const > const &phi, Array4< Real const > const &apx, Array4< Real const > const &cvol, Array4< Real const > const &ccent, Array4< Real const > const &fcx, Array4< Real > const &edg_x, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_centroid2facecent_y (Box const &vbx, Array4< Real const > const &phi, Array4< Real const > const &apy, Array4< Real const > const &cvol, Array4< Real const > const &ccent, Array4< Real const > const &fcy, Array4< Real > const &edg_y, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2face_x (Box const &ubx, Array4< Real const > const &phi, Array4< Real > const &edg_x, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2face_y (Box const &vbx, Array4< Real const > const &phi, Array4< Real > const &edg_y, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_add_divergence_from_flow (int i, int j, int k, int n, Array4< Real > const &divu, Array4< Real const > const &vel_eb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &bnorm, Array4< Real const > const &barea, GpuArray< Real, 2 > const &dxinv) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | EB_interp_in_quad (Real xint, Real yint, Real v0, Real v1, Real v2, Real v3, Real x0, Real y0, Real x1, Real y1, Real x2, Real y2, Real x3, Real y3) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avgdown_face_z (int i, int j, int k, Array4< Real const > const &fine, int fcomp, Array4< Real > const &crse, int ccomp, Array4< Real const > const &area, Dim3 const &ratio, int ncomp) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_compute_divergence (int i, int j, int k, int n, Array4< Real > const &divu, Array4< Real const > const &u, Array4< Real const > const &v, Array4< Real const > const &w, Array4< int const > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, GpuArray< Real, 3 > const &dxinv, bool already_on_centroids) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_avg_fc_to_cc (int i, int j, int k, int n, Array4< Real > const &cc, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &fz, Array4< Real const > const &ax, Array4< Real const > const &ay, Array4< Real const > const &az, Array4< EBCellFlag const > const &flag) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2facecent_z (Box const &wbx, Array4< Real const > const &phi, Array4< Real const > const &apz, Array4< Real const > const &fcz, Array4< Real > const &edg_z, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_centroid2facecent_z (Box const &wbx, Array4< Real const > const &phi, Array4< Real const > const &apz, Array4< Real const > const &cvol, Array4< Real const > const &ccent, Array4< Real const > const &fcz, Array4< Real > const &phi_z, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_interp_cc2face_z (Box const &wbx, Array4< Real const > const &phi, Array4< Real > const &edg_z, int ncomp, const Box &domain, const BCRec *bc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | eb_add_divergence_from_flow (int i, int j, int k, int n, Array4< Real > const &divu, Array4< Real const > const &vel_eb, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &bnorm, Array4< Real const > const &barea, GpuArray< Real, 3 > const &dxinv) |
|
void | WriteEBSurface (const BoxArray &ba, const DistributionMapping &dmap, const Geometry &geom, const EBFArrayBoxFactory *ebf) |
|
static std::string | thePlotFileType () |
|
void | writePlotFile (const std::string &dir, std::ostream &os, int level, const MultiFab &mf, const Geometry &geom, const IntVect &refRatio, Real bgVal, const Vector< std::string > &names) |
|
void | writePlotFile (const char *name, const MultiFab &mf, const Geometry &geom, const IntVect &refRatio, Real bgVal, const Vector< std::string > &names) |
|
void | WritePlotFile (const Vector< MultiFab * > &mfa, const Vector< Box > &probDomain, AmrData &amrdToMimic, const std::string &oFile, bool verbose, const Vector< std::string > &varNames) |
|
void | WritePlotFile (const Vector< MultiFab * > &mfa, AmrData &amrdToMimic, const std::string &oFile, bool verbose, const Vector< std::string > &varNames) |
|
void | writePlotFile (const char *name, const amrex::MultiFab &mf, const amrex::Geometry &geom, const amrex::IntVect &refRatio, amrex::Real bgVal, const amrex::Vector< std::string > &names) |
|
bool | Nestsets (const int level, const int n_levels, const FArrayBox &fab, const Vector< const BoxArray * > box_arrays, const Vector< IntVect > &ref_ratio, const Vector< int > &domain_offsets, conduit::Node &nestset) |
|
void | FabToBlueprintTopology (const Geometry &geom, const FArrayBox &fab, Node &res) |
|
void | AddFabGhostIndicatorField (const FArrayBox &fab, int ngrow, Node &res) |
|
void | FabToBlueprintFields (const FArrayBox &fab, const Vector< std::string > &varnames, Node &res) |
|
void | SingleLevelToBlueprint (const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time_value, int level_step, Node &res) |
|
void | MultiLevelToBlueprint (int n_levels, const Vector< const MultiFab * > &mfs, const Vector< std::string > &varnames, const Vector< Geometry > &geoms, Real time_value, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, Node &res) |
|
void | WriteBlueprintFiles (const conduit::Node &bp_mesh, const std::string &fname_base, int step, const std::string &protocol) |
|
void | SingleLevelToBlueprint (const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time_value, int level_step, conduit::Node &bp_mesh) |
|
void | MultiLevelToBlueprint (int n_levels, const Vector< const MultiFab * > &mfs, const Vector< std::string > &varnames, const Vector< Geometry > &geoms, Real time_value, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, conduit::Node &bp_mesh) |
|
template<typename ParticleType , int NArrayReal, int NArrayInt> |
void | ParticleTileToBlueprint (const ParticleTile< ParticleType, NArrayReal, NArrayInt > &ptile, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, conduit::Node &res, const std::string &topology_name) |
|
template<typename ParticleType , int NArrayReal, int NArrayInt> |
void | ParticleContainerToBlueprint (const ParticleContainer_impl< ParticleType, NArrayReal, NArrayInt > &pc, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, conduit::Node &res, const std::string &topology_name) |
|
static int | CreateWriteHDF5AttrDouble (hid_t loc, const char *name, hsize_t n, const double *data) |
|
static int | CreateWriteHDF5AttrInt (hid_t loc, const char *name, hsize_t n, const int *data) |
|
static int | CreateWriteHDF5AttrString (hid_t loc, const char *name, const char *str) |
|
static void | SetHDF5fapl (hid_t fapl, MPI_Comm comm) |
|
static void | WriteGenericPlotfileHeaderHDF5 (hid_t fid, int nlevels, const Vector< const MultiFab * > &mf, const Vector< BoxArray > &bArray, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteMultiLevelPlotfileHDF5SingleDset (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteMultiLevelPlotfileHDF5MultiDset (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteSingleLevelPlotfileHDF5 (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteSingleLevelPlotfileHDF5SingleDset (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteSingleLevelPlotfileHDF5MultiDset (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
void | WriteMultiLevelPlotfileHDF5 (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | habec_mat (GpuArray< Real, 2 *AMREX_SPACEDIM+1 > &sten, int i, int j, int k, Dim3 const &boxlo, Dim3 const &boxhi, Real sa, Array4< Real const > const &a, Real sb, GpuArray< Real, AMREX_SPACEDIM > const &dx, GpuArray< Array4< Real const >, AMREX_SPACEDIM > const &b, GpuArray< int, AMREX_SPACEDIM *2 > const &bctype, GpuArray< Real, AMREX_SPACEDIM *2 > const &bcl, int bho, GpuArray< Array4< int const >, AMREX_SPACEDIM *2 > const &msk, Array4< Real > const &diaginv) |
|
template<typename Int > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | habec_ijmat (GpuArray< Real, 2 *AMREX_SPACEDIM+1 > &sten, Array4< Int > const &ncols, Array4< Real > const &diaginv, int i, int j, int k, Array4< Int const > const &cell_id, Real sa, Array4< Real const > const &a, Real sb, GpuArray< Real, AMREX_SPACEDIM > const &dx, GpuArray< Array4< Real const >, AMREX_SPACEDIM > const &b, GpuArray< int, AMREX_SPACEDIM *2 > const &bctype, GpuArray< Real, AMREX_SPACEDIM *2 > const &bcl, int bho, Array4< int const > const &osm) |
|
template<typename Int > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | habec_cols (GpuArray< Int, 2 *AMREX_SPACEDIM+1 > &sten, int i, int j, int, Array4< Int const > const &cell_id) |
|
std::unique_ptr< Hypre > | makeHypre (const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | hypmlabeclap_f2c_set_values (IntVect const &cell, Real *values, GpuArray< Real, AMREX_SPACEDIM > const &dx, Real sb, GpuArray< Array4< Real const >, AMREX_SPACEDIM > const &b, GpuArray< Array4< int const >, AMREX_SPACEDIM *2 > const &bmask, IntVect const &refratio, int not_covered) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | hypmlabeclap_c2f (int i, int j, int k, Array4< GpuArray< Real, 2 *AMREX_SPACEDIM+1 >> const &stencil, GpuArray< HYPRE_Int, AMREX_SPACEDIM > *civ, HYPRE_Int *nentries, int *entry_offset, Real *entry_values, Array4< int const > const &offset_from, Array4< int const > const &nentries_to, Array4< int const > const &offset_to, GpuArray< Real, AMREX_SPACEDIM > const &dx, Real sb, Array4< int const > const &offset_bx, Array4< int const > const &offset_by, Real const *bx, Real const *by, Array4< int const > const &fine_mask, IntVect const &rr, Array4< int const > const &crse_mask) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | hypmlabeclap_c2f (int i, int j, int k, Array4< GpuArray< Real, 2 *AMREX_SPACEDIM+1 >> const &stencil, GpuArray< HYPRE_Int, AMREX_SPACEDIM > *civ, HYPRE_Int *nentries, int *entry_offset, Real *entry_values, Array4< int const > const &offset_from, Array4< int const > const &nentries_to, Array4< int const > const &offset_to, GpuArray< Real, AMREX_SPACEDIM > const &dx, Real sb, Array4< int const > const &offset_bx, Array4< int const > const &offset_by, Array4< int const > const &offset_bz, Real const *bx, Real const *by, Real const *bz, Array4< int const > const &fine_mask, IntVect const &rr, Array4< int const > const &crse_mask) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | hypmlabeclap_mat (GpuArray< Real, 2 *AMREX_SPACEDIM+1 > &sten, int i, int j, int k, Dim3 const &boxlo, Dim3 const &boxhi, Real sa, Array4< Real const > const &a, Real sb, GpuArray< Real, AMREX_SPACEDIM > const &dx, GpuArray< Array4< Real const >, AMREX_SPACEDIM > const &b, GpuArray< int, AMREX_SPACEDIM *2 > const &bctype, GpuArray< Real, AMREX_SPACEDIM *2 > const &bcl, GpuArray< Array4< int const >, AMREX_SPACEDIM *2 > const &bcmsk, GpuArray< Array4< Real const >, AMREX_SPACEDIM *2 > const &bcval, GpuArray< Array4< Real >, AMREX_SPACEDIM *2 > const &bcrhs, int level, IntVect const &fixed_pt) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | hypmlabeclap_rhs (int i, int j, int k, Dim3 const &boxlo, Dim3 const &boxhi, Array4< Real > const &rhs1, Array4< Real const > const &rhs0, GpuArray< Array4< int const >, AMREX_SPACEDIM *2 > const &bcmsk, GpuArray< Array4< Real const >, AMREX_SPACEDIM *2 > const &bcrhs) |
|
std::unique_ptr< PETScABecLap > | makePetsc (const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_) |
|
std::string | SanitizeName (const std::string &sname) |
|
void | SimpleRemoveOverlap (BoxArray &ba) |
|
void | avgDown_doit (const FArrayBox &fine_fab, FArrayBox &crse_fab, const Box &ovlp, int scomp, int dcomp, int ncomp, Vector< int > &ratio) |
|
Box | FixCoarseBoxSize (const Box &fineBox, int rr) |
|
void | avgDown (MultiFab &S_crse, MultiFab &S_fine, int scomp, int dcomp, int ncomp, Vector< int > &ratio) |
|
void | PrintTimeRangeList (const std::list< RegionsProfStats::TimeRange > &trList) |
|
void | RedistFiles () |
|
int | NHops (const Box &tbox, const IntVect &ivfrom, const IntVect &ivto) |
|
void | Write2DFab (const string &filenameprefix, const int xdim, const int ydim, const double *data) |
|
void | Write2DText (const string &filenameprefix, const int xdim, const int ydim, const double *data) |
|
void | Write3DFab (const string &filenameprefix, const int xdim, const int ydim, const int zdim, const double *data) |
|
void | WriteFab (const string &filenameprefix, const int xdim, const int ydim, const double *data) |
|
long | FileSize (const std::string &filename) |
|
void | MakeFuncPctTimesMF (const Vector< Vector< BLProfStats::FuncStat > > &funcStats, const Vector< std::string > &blpFNames, const std::map< std::string, BLProfiler::ProfStats > &mProfStats, Real runTime, int dataNProcs) |
|
void | CollectMProfStats (std::map< std::string, BLProfiler::ProfStats > &mProfStats, const Vector< Vector< BLProfStats::FuncStat > > &funcStats, const Vector< std::string > &fNames, Real runTime, int whichProc) |
|
void | GraphTopPct (const std::map< std::string, BLProfiler::ProfStats > &mProfStats, const Vector< Vector< BLProfStats::FuncStat > > &funcStats, const Vector< std::string > &fNames, Real runTime, int dataNProcs, Real gPercent) |
|
void | WritePlotfile (const std::string &pfversion, const Vector< MultiFab > &data, const Real time, const Vector< Real > &probLo, const Vector< Real > &probHi, const Vector< int > &refRatio, const Vector< Box > &probDomain, const Vector< Vector< Real > > &dxLevel, const int coordSys, const std::string &oFile, const Vector< std::string > &names, const bool verbose, const bool isCartGrid, const Real *vfeps, const int *levelSteps) |
|
std::string | VisMFBaseName (const std::string &filename) |
|
void | Write2DBoxFrom3D (const Box &box, std::ostream &os, int whichPlane) |
|
VisMF::FabOnDisk | VisMFWrite (const FArrayBox &fabIn, const std::string &filename, std::ostream &os, long &bytes, int whichPlane) |
|
static std::ostream & | operator<< (std::ostream &os, const Vector< Vector< Real > > &ar) |
|
long | VisMFWriteHeader (const std::string &mf_name, VisMF::Header &hdr, int whichPlane) |
|
void | WritePlotfile2DFrom3D (const std::string &pfversion, const Vector< MultiFab > &data, const Real time, const Vector< Real > &probLo, const Vector< Real > &probHi, const Vector< int > &refRatio, const Vector< Box > &probDomain, const Vector< Vector< Real > > &dxLevel, const int coordSys, const std::string &oFile, const Vector< std::string > &names, const bool verbose, const bool isCartGrid, const Real *vfeps, const int *levelSteps) |
|
| senseiNewMacro (AmrDataAdaptor) |
|
| senseiNewMacro (AmrMeshDataAdaptor) |
|
template<typename V1 , typename F > |
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value > | ForEach (V1 &x, F const &f) |
|
template<typename V1 , typename V2 , typename F > |
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value > | ForEach (V1 &x, V2 &y, F const &f) |
|
template<typename V1 , typename V2 , typename V3 , typename F > |
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value > | ForEach (V1 &x, V2 &y, V3 &z, F const &f) |
|
template<typename V1 , typename V2 , typename V3 , typename V4 , typename F > |
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value &&IsAlgVector< std::decay_t< V4 > >::value > | ForEach (V1 &x, V2 &y, V3 &z, V4 &a, F const &f) |
|
template<typename V1 , typename V2 , typename V3 , typename V4 , typename V5 , typename F > |
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value &&IsAlgVector< std::decay_t< V4 > >::value &&IsAlgVector< std::decay_t< V5 > >::value > | ForEach (V1 &x, V2 &y, V3 &z, V4 &a, V5 &b, F const &f) |
|
template<typename T > |
T | Dot (AlgVector< T > const &x, AlgVector< T > const &y, bool local=false) |
|
template<typename T > |
void | Axpy (AlgVector< T > &y, T a, AlgVector< T > const &x, bool async=false) |
|
template<typename T > |
void | LinComb (AlgVector< T > &y, T a, AlgVector< T > const &xa, T b, AlgVector< T > const &xb, bool async=false) |
|
template<typename T > |
void | SpMV (AlgVector< T > &y, SpMatrix< T > const &A, AlgVector< T > const &x) |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx (int i, int, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx_os (int i, int, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< int const > const &osm, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_normalize (int i, int, int, int n, Array4< T > const &x, Array4< T const > const &a, Array4< T const > const &bX, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_x (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, Array4< T const > const &bx, T fac, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_xface (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, Array4< T const > const &bx, T fac, int xlen, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb (int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb_os (int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Array4< int const > const &osm, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi (int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi_os (int i, int, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, Array4< T const > const &bX, Array4< int const > const &m0, Array4< int const > const &m1, Array4< T const > const &f0, Array4< T const > const &f1, Array4< int const > const &osm, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_FORCE_INLINE void | abec_gsrb_with_line_solve (Box const &, Array4< T > const &, Array4< T const > const &, T, Array4< T const > const &, T, Array4< T const > const &, Array4< int const > const &, Array4< int const > const &, Array4< T const > const &, Array4< T const > const &, Box const &, int, int) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | overset_rescale_bcoef_x (Box const &box, Array4< T > const &bX, Array4< int const > const &osm, int ncomp, T osfac) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx (int i, int j, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx_os (int i, int j, int, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &osm, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_normalize (int i, int j, int, int n, Array4< T > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_y (Box const &box, Array4< T > const &fy, Array4< T const > const &sol, Array4< T const > const &by, T fac, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_yface (Box const &box, Array4< T > const &fy, Array4< T const > const &sol, Array4< T const > const &by, T fac, int ylen, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb (int i, int j, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f1, Array4< T const > const &f3, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb_os (int i, int j, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f1, Array4< T const > const &f3, Array4< int const > const &osm, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi (int i, int j, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, T dhy, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f1, Array4< T const > const &f3, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi_os (int i, int j, int, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, T dhy, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f1, Array4< T const > const &f3, Array4< int const > const &osm, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_FORCE_INLINE void | abec_gsrb_with_line_solve (Box const &box, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, Array4< T const > const &bX, Array4< T const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f1, Array4< T const > const &f3, Box const &vbox, int redblack, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | overset_rescale_bcoef_y (Box const &box, Array4< T > const &bY, Array4< int const > const &osm, int ncomp, T osfac) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx (int i, int j, int k, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_adotx_os (int i, int j, int k, int n, Array4< T > const &y, Array4< T const > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &osm, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_normalize (int i, int j, int k, int n, Array4< T > const &x, Array4< T const > const &a, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, GpuArray< T, AMREX_SPACEDIM > const &dxinv, T alpha, T beta) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_z (Box const &box, Array4< T > const &fz, Array4< T const > const &sol, Array4< T const > const &bz, T fac, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlabeclap_flux_zface (Box const &box, Array4< T > const &fz, Array4< T const > const &sol, Array4< T const > const &bz, T fac, int zlen, int ncomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb (int i, int j, int k, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, T dhz, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f4, Array4< T const > const &f1, Array4< T const > const &f3, Array4< T const > const &f5, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_gsrb_os (int i, int j, int k, int n, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, T dhz, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f4, Array4< T const > const &f1, Array4< T const > const &f3, Array4< T const > const &f5, Array4< int const > const &osm, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi (int i, int j, int k, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, T dhy, T dhz, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f4, Array4< T const > const &f1, Array4< T const > const &f3, Array4< T const > const &f5, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | abec_jacobi_os (int i, int j, int k, int n, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T alpha, Array4< T const > const &a, T dhx, T dhy, T dhz, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f4, Array4< T const > const &f1, Array4< T const > const &f3, Array4< T const > const &f5, Array4< int const > const &osm, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_FORCE_INLINE void | tridiagonal_solve (Array1D< T, 0, 31 > &a_ls, Array1D< T, 0, 31 > &b_ls, Array1D< T, 0, 31 > &c_ls, Array1D< T, 0, 31 > &r_ls, Array1D< T, 0, 31 > &u_ls, Array1D< T, 0, 31 > &gam, int ilen) noexcept |
|
template<typename T > |
AMREX_FORCE_INLINE void | abec_gsrb_with_line_solve (Box const &box, Array4< T > const &phi, Array4< T const > const &rhs, T alpha, Array4< T const > const &a, T dhx, T dhy, T dhz, Array4< T const > const &bX, Array4< T const > const &bY, Array4< T const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< T const > const &f0, Array4< T const > const &f2, Array4< T const > const &f4, Array4< T const > const &f1, Array4< T const > const &f3, Array4< T const > const &f5, Box const &vbox, int redblack, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | overset_rescale_bcoef_z (Box const &box, Array4< T > const &bZ, Array4< int const > const &osm, int ncomp, T osfac) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_adotx (Box const &box, Array4< RT > const &y, Array4< RT const > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_adotx_m (Box const &box, Array4< RT > const &y, Array4< RT const > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, RT dx, RT probxlo, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_normalize (Box const &box, Array4< RT > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_normalize_m (Box const &box, Array4< RT > const &x, Array4< RT const > const &a, GpuArray< RT, AMREX_SPACEDIM > const &dxinv, RT alpha, RT beta, RT dx, RT probxlo, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_x (Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_x_m (Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, RT dx, RT probxlo, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_xface (Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int xlen, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_xface_m (Box const &box, Array4< RT > const &fx, Array4< RT const > const &sol, RT fac, int xlen, RT dx, RT probxlo, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_gsrb (Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_gsrb_m (Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack, RT dx, RT probxlo, int ncomp) |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_y (Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_yface (Box const &box, Array4< RT > const &fy, Array4< RT const > const &sol, RT fac, int ylen, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_z (Box const &box, Array4< RT > const &fz, Array4< RT const > const &sol, RT fac, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_flux_zface (Box const &box, Array4< RT > const &fz, Array4< RT const > const &sol, RT fac, int zlen, int ncomp) noexcept |
|
template<typename RT > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlalap_gsrb (Box const &box, Array4< RT > const &phi, Array4< RT const > const &rhs, RT alpha, RT dhx, RT dhy, RT dhz, Array4< RT const > const &a, Array4< RT const > const &f0, Array4< int const > const &m0, Array4< RT const > const &f1, Array4< int const > const &m1, Array4< RT const > const &f2, Array4< int const > const &m2, Array4< RT const > const &f3, Array4< int const > const &m3, Array4< RT const > const &f4, Array4< int const > const &m4, Array4< RT const > const &f5, Array4< int const > const &m5, Box const &vbox, int redblack, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | coarsen_overset_mask (Box const &bx, Array4< int > const &cmsk, Array4< int const > const &fmsk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | coarsen_overset_mask (int i, int, int, Array4< int > const &cmsk, Array4< int const > const &fmsk) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_adotx_x (int i, int j, int k, Array4< Real > const &Ax, Array4< Real const > const &ex, Array4< Real const > const &ey, Array4< Real const > const &ez, Real beta, GpuArray< Real, AMREX_SPACEDIM > const &adxinv) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_adotx_y (int i, int j, int k, Array4< Real > const &Ay, Array4< Real const > const &ex, Array4< Real const > const &ey, Array4< Real const > const &ez, Real beta, GpuArray< Real, AMREX_SPACEDIM > const &adxinv) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_adotx_z (int i, int j, int k, Array4< Real > const &Az, Array4< Real const > const &ex, Array4< Real const > const &ey, Array4< Real const > const &ez, Real beta, GpuArray< Real, AMREX_SPACEDIM > const &adxinv) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_1D (int i, int j, int k, Array4< Real > const &ex, Array4< Real > const &ey, Array4< Real > const &ez, Array4< Real const > const &rhsx, Array4< Real const > const &rhsy, Array4< Real const > const &rhsz, Real beta, GpuArray< Real, AMREX_SPACEDIM > const &adxinv, int color, CurlCurlDirichletInfo const &dinfo) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_1D (int i, int j, int k, Array4< Real > const &ex, Array4< Real > const &ey, Array4< Real > const &ez, Array4< Real const > const &rhsx, Array4< Real const > const &rhsy, Array4< Real const > const &rhsz, Array4< Real const > const &betax, Array4< Real const > const &betay, Array4< Real const > const &betaz, GpuArray< Real, AMREX_SPACEDIM > const &adxinv, int color, CurlCurlDirichletInfo const &dinfo) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_interpadd (int dir, int i, int j, int k, Array4< Real > const &fine, Array4< Real const > const &crse) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_restriction (int dir, int i, int j, int k, Array4< Real > const &crse, Array4< Real const > const &fine, CurlCurlDirichletInfo const &dinfo) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlcurlcurl_bc_symmetry (int i, int j, int k, Orientation face, IndexType it, Array4< Real > const &a) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_adotx_centroid (Box const &box, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &a, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &ccent, Array4< Real const > const &ba, Array4< Real const > const &bcent, Array4< Real const > const &beb, Array4< Real const > const &phieb, const int &domlo_x, const int &domlo_y, const int &domhi_x, const int &domhi_y, const bool &on_x_face, const bool &on_y_face, bool is_eb_dirichlet, bool is_eb_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real alpha, Real beta, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_adotx (Box const &box, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &a, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< const int > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &ba, Array4< Real const > const &bc, Array4< Real const > const &beb, bool is_dirichlet, Array4< Real const > const &phieb, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real alpha, Real beta, int ncomp, bool beta_on_centroid, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_ebflux (int i, int j, int k, int n, Array4< Real > const &feb, Array4< Real const > const &x, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &bc, Array4< Real const > const &beb, Array4< Real const > const &phieb, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_gsrb (Box const &box, Array4< Real > const &phi, Array4< Real const > const &rhs, Real alpha, Array4< Real const > const &a, Real dhx, Real dhy, Real dh, GpuArray< Real, AMREX_SPACEDIM > const &dx, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m1, Array4< int const > const &m3, Array4< Real const > const &f0, Array4< Real const > const &f2, Array4< Real const > const &f1, Array4< Real const > const &f3, Array4< const int > const &ccm, Array4< Real const > const &beb, EBData const &ebdata, bool is_dirichlet, bool beta_on_centroid, bool phi_on_centroid, Box const &vbox, int redblack, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_x (Box const &box, Array4< Real > const &fx, Array4< Real const > const &apx, Array4< Real const > const &fcx, Array4< Real const > const &sol, Array4< Real const > const &bX, Array4< int const > const &ccm, Real dhx, int face_only, int ncomp, Box const &xbox, bool beta_on_centroid, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_y (Box const &box, Array4< Real > const &fy, Array4< Real const > const &apy, Array4< Real const > const &fcy, Array4< Real const > const &sol, Array4< Real const > const &bY, Array4< int const > const &ccm, Real dhy, int face_only, int ncomp, Box const &ybox, bool beta_on_centroid, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_x_0 (Box const &box, Array4< Real > const &fx, Array4< Real const > const &apx, Array4< Real const > const &sol, Array4< Real const > const &bX, Real dhx, int face_only, int ncomp, Box const &xbox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_y_0 (Box const &box, Array4< Real > const &fy, Array4< Real const > const &apy, Array4< Real const > const &sol, Array4< Real const > const &bY, Real dhy, int face_only, int ncomp, Box const &ybox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_x (Box const &box, Array4< Real > const &gx, Array4< Real const > const &sol, Array4< Real const > const &apx, Array4< Real const > const &fcx, Array4< int const > const &ccm, Real dxi, int ncomp, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_y (Box const &box, Array4< Real > const &gy, Array4< Real const > const &sol, Array4< Real const > const &apy, Array4< Real const > const &fcy, Array4< int const > const &ccm, Real dyi, int ncomp, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_x_0 (Box const &box, Array4< Real > const &gx, Array4< Real const > const &sol, Array4< Real const > const &apx, Real dxi, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_y_0 (Box const &box, Array4< Real > const &gy, Array4< Real const > const &sol, Array4< Real const > const &apy, Real dyi, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_normalize (Box const &box, Array4< Real > const &phi, Real alpha, Array4< Real const > const &a, Real dhx, Real dhy, Real dh, const amrex::GpuArray< Real, AMREX_SPACEDIM > &dx, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< const int > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &ba, Array4< Real const > const &bc, Array4< Real const > const &beb, bool is_dirichlet, bool beta_on_centroid, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_adotx_centroid (Box const &box, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &a, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< Real const > const &bZ, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ccent, Array4< Real const > const &ba, Array4< Real const > const &bcent, Array4< Real const > const &beb, Array4< Real const > const &phieb, const int &domlo_x, const int &domlo_y, const int &domlo_z, const int &domhi_x, const int &domhi_y, const int &domhi_z, const bool &on_x_face, const bool &on_y_face, const bool &on_z_face, bool is_eb_dirichlet, bool is_eb_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real alpha, Real beta, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_adotx (Box const &box, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &a, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< Real const > const &bZ, Array4< const int > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ba, Array4< Real const > const &bc, Array4< Real const > const &beb, bool is_dirichlet, Array4< Real const > const &phieb, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real alpha, Real beta, int ncomp, bool beta_on_centroid, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_ebflux (int i, int j, int k, int n, Array4< Real > const &feb, Array4< Real const > const &x, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &bc, Array4< Real const > const &beb, Array4< Real const > const &phieb, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_gsrb (Box const &box, Array4< Real > const &phi, Array4< Real const > const &rhs, Real alpha, Array4< Real const > const &a, Real dhx, Real dhy, Real dhz, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< Real const > const &bZ, Array4< int const > const &m0, Array4< int const > const &m2, Array4< int const > const &m4, Array4< int const > const &m1, Array4< int const > const &m3, Array4< int const > const &m5, Array4< Real const > const &f0, Array4< Real const > const &f2, Array4< Real const > const &f4, Array4< Real const > const &f1, Array4< Real const > const &f3, Array4< Real const > const &f5, Array4< const int > const &ccm, Array4< Real const > const &beb, EBData const &ebdata, bool is_dirichlet, bool beta_on_centroid, bool phi_on_centroid, Box const &vbox, int redblack, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_z (Box const &box, Array4< Real > const &fz, Array4< Real const > const &apz, Array4< Real const > const &fcz, Array4< Real const > const &sol, Array4< Real const > const &bZ, Array4< int const > const &ccm, Real dhz, int face_only, int ncomp, Box const &zbox, bool beta_on_centroid, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_flux_z_0 (Box const &box, Array4< Real > const &fz, Array4< Real const > const &apz, Array4< Real const > const &sol, Array4< Real const > const &bZ, Real dhz, int face_only, int ncomp, Box const &zbox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_z (Box const &box, Array4< Real > const &gz, Array4< Real const > const &sol, Array4< Real const > const &apz, Array4< Real const > const &fcz, Array4< int const > const &ccm, Real dzi, int ncomp, bool phi_on_centroid) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_grad_z_0 (Box const &box, Array4< Real > const &gz, Array4< Real const > const &sol, Array4< Real const > const &apz, Real dzi, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_normalize (Box const &box, Array4< Real > const &phi, Real alpha, Array4< Real const > const &a, Real dhx, Real dhy, Real dhz, Array4< Real const > const &bX, Array4< Real const > const &bY, Array4< Real const > const &bZ, Array4< const int > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrc, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ba, Array4< Real const > const &bc, Array4< Real const > const &beb, bool is_dirichlet, bool beta_on_centroid, int ncomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_apply_bc_x (int side, Box const &box, int blen, Array4< Real > const &phi, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, Array4< Real const > const &bcval, int maxorder, Real dxinv, int inhomog, int icomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_apply_bc_y (int side, Box const &box, int blen, Array4< Real > const &phi, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, Array4< Real const > const &bcval, int maxorder, Real dyinv, int inhomog, int icomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebabeclap_apply_bc_z (int side, Box const &box, int blen, Array4< Real > const &phi, Array4< int const > const &mask, Array4< Real const > const &area, BoundCond bct, Real bcl, Array4< Real const > const &bcval, int maxorder, Real dzinv, int inhomog, int icomp) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, Real) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, Real, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, Array4< Real const > const &, Real) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_gsrb (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, Array4< Real const > const &, Real, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_scale_rhs (int i, int j, int, Array4< Real > const &rhs, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb_doit (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, F const &xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Real xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &dmsk, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb_eb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Real bx, Real by, int redblack) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< int const > const &dmsk, Real bx, Real by, int redblack) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_rz_eb_doit (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, F const &xeb, Real sigr, Real dr, Real dz, Real rlo, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_rz_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Real xeb, Real sigr, Real dr, Real dz, Real rlo, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_rz_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &xeb, Real sigr, Real dr, Real dz, Real rlo, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_rz (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &dmsk, Real sigr, Real dr, Real dz, Real rlo, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb_rz_eb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Real sigr, Real dr, Real dz, Real rlo, int redblack, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb_rz (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< int const > const &dmsk, Real sigr, Real dr, Real dz, Real rlo, int redblack, Real alpha) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &dmsk, Array4< Real const > const &sig, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_gsrb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< int const > const &dmsk, Array4< Real const > const &sig, Real bx, Real by, int redblack) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb_doit (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &sig, Array4< Real const > const &vfrc, F const &xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Real xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Array4< Real const > const &xeb, Real bx, Real by) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_gsrb_eb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Real bx, Real by, int redblack) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_scale_rhs (int i, int j, int k, Array4< Real > const &rhs, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb_doit (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, F const &xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Real xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Array4< Real const > const &xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &dmsk, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb_eb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Real bx, Real by, Real bz, int redblack) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_gsrb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< int const > const &dmsk, Real bx, Real by, Real bz, int redblack) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &dmsk, Array4< Real const > const &sig, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_gsrb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< int const > const &dmsk, Array4< Real const > const &sig, Real bx, Real by, Real bz, int redblack) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb_doit (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Array4< Real const > const &sig, Array4< Real const > const &vfrc, F const &xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Real xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_adotx_eb (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Array4< Real const > const &xeb, Real bx, Real by, Real bz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_sig_gsrb_eb (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &rhs, Array4< Real const > const &levset, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &ecy, Array4< Real const > const &ecz, Array4< Real const > const &sig, Array4< Real const > const &vfrc, Real bx, Real by, Real bz, int redblack) noexcept |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_x_doit (int i, int j, int k, Array4< Real > const &px, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecx, F const &phieb, Real dxi) |
|
template<typename F > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_y_doit (int i, int j, int k, Array4< Real > const &py, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecy, F const &phieb, Real dyi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_x (Box const &b, Array4< Real > const &px, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Real phieb, Real dxi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_x (Box const &b, Array4< Real > const &px, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecx, Array4< Real const > const &phieb, Real dxi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_y (Box const &b, Array4< Real > const &py, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecy, Real phieb, Real dyi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_y (Box const &b, Array4< Real > const &py, Array4< Real const > const &p, Array4< int const > const &dmsk, Array4< Real const > const &ecy, Array4< Real const > const &phieb, Real dyi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebndfdlap_grad_x (Box const &b, Array4< Real > const &px, Array4< Real const > const &p, Real dxi) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &etax, Array4< Real const > const &kapx, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &etay, Array4< Real const > const &kapy, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &etax, Array4< Real const > const &kapx, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &etay, Array4< Real const > const &kapy, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &vel, Array4< Real const > const &velb, Array4< Real const > const &etab, Array4< Real const > const &kapb, Array4< int const > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vol, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &bc, bool is_dirichlet, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_flux_0 (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &ap, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_flux_x (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &apx, Array4< Real const > const &fcx, Real const bscalar, Array4< int const > const &ccm, int face_only, Box const &xbox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_flux_y (Box const &box, Array4< Real > const &Ay, Array4< Real const > const &fy, Array4< Real const > const &apy, Array4< Real const > const &fcy, Real const bscalar, Array4< int const > const &ccm, int face_only, Box const &ybox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcx, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &apx, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcx, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &apy, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dz_on_xface (int i, int j, int, int n, Array4< Real const > const &vel, Real dzi, Real whi, Real wlo, int khip, int khim, int klop, int klom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dz_on_yface (int i, int j, int, int n, Array4< Real const > const &vel, Real dzi, Real whi, Real wlo, int khip, int khim, int klop, int klom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dx_on_zface (int, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Real whi, Real wlo, int ihip, int ihim, int ilop, int ilom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dy_on_zface (int i, int, int k, int n, Array4< Real const > const &vel, Real dyi, Real whi, Real wlo, int jhip, int jhim, int jlop, int jlom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &etaz, Array4< Real const > const &kapz, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dz_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int khip, int khim, int klop, int klom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dz_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int khip, int khim, int klop, int klom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dx_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int ihip, int ihim, int ilop, int ilom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dy_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int jhip, int jhim, int jlop, int jlom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &etaz, Array4< Real const > const &kapz, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_cross_terms (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &fz, Array4< Real const > const &vel, Array4< Real const > const &velb, Array4< Real const > const &etab, Array4< Real const > const &kapb, Array4< int const > const &ccm, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vol, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &bc, bool is_dirichlet, bool is_inhomog, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_flux_z (Box const &box, Array4< Real > const &Az, Array4< Real const > const &fz, Array4< Real const > const &apz, Array4< Real const > const &fcz, Real const bscalar, Array4< int const > const &ccm, int face_only, Box const &zbox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlebtensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &apz, Array4< EBCellFlag const > const &flag, Array4< int const > const &ccm, Array4< Real const > const &fcz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_weight (int d) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dy_on_xface (int i, int, int k, int n, Array4< Real const > const &vel, Real dyi, Real whi, Real wlo, int jhip, int jhim, int jlop, int jlom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dx_on_yface (int, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Real whi, Real wlo, int ihip, int ihim, int ilop, int ilom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dy_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int jhip, int jhim, int jlop, int jlom) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlebtensor_dx_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi, Real whi, Real wlo, int ihip, int ihim, int ilop, int ilom) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_x (int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dxinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_x (int side, int i, int j, int k, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dxinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_y (int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dyinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_y (int side, int i, int j, int k, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dyinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_z (int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dzinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_bc_z (int side, int i, int j, int k, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dzinv, int inhomog, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_x (int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dxinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_x (int side, int i, int j, int k, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dxinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_y (int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dyinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_y (int side, int i, int j, int k, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dyinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_z (int side, Box const &box, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dzinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_comp_interp_coef0_z (int side, int i, int j, int k, int blen, Array4< T > const &f, Array4< int const > const &mask, BoundCond bct, T bcl, int maxorder, T dzinv, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_xlo (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_xhi (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_ylo (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_ylo_m (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, BoundCond bct, T, Array4< T const > const &bcval, T fac, T xlo, T dx, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_yhi (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_yhi_m (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, BoundCond bct, T, Array4< T const > const &bcval, T fac, T xlo, T dx, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_zlo (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mllinop_apply_innu_zhi (int i, int j, int k, Array4< T > const &rhs, Array4< int const > const &mask, Array4< T const > const &bcoef, BoundCond bct, T, Array4< T const > const &bcval, T fac, bool has_bcoef, int icomp) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlmg_lin_cc_interp_r2 (Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlmg_lin_cc_interp_r4 (Box const &bx, Array4< T > const &ff, Array4< T const > const &cc, int nc) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlmg_lin_nd_interp_r2 (int i, int, int, int n, Array4< T > const &fine, Array4< T const > const &crse) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlmg_lin_nd_interp_r4 (int i, int, int, int n, Array4< T > const &fine, Array4< T const > const &crse) noexcept |
|
void | mlndabeclap_gauss_seidel_aa (Box const &, Array4< Real > const &, Array4< Real const > const &, Real, Real, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, GpuArray< Real, AMREX_SPACEDIM > const &) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndabeclap_jacobi_aa (int, int, int, Array4< Real > const &, Real, Array4< Real const > const &, Real, Real, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, GpuArray< Real, AMREX_SPACEDIM > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_zero_fine (int i, int, int, Array4< Real > const &phi, Array4< int const > const &msk, int fine_flag) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_avgdown_coeff_x (int i, int, int, Array4< Real > const &crse, Array4< Real const > const &fine) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_semi_avgdown_coeff (int i, int j, int k, Array4< Real > const &crse, Array4< Real const > const &fine, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_c (int i, int, int, Array4< Real const > const &x, Real sigma, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_ha (int i, int, int, Array4< Real const > const &x, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_aa (int i, int j, int k, Array4< Real const > const &x, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_normalize_ha (int i, int, int, Array4< Real > const &x, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_normalize_aa (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_ha (int i, int, int, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_jacobi_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_aa (int i, int j, int k, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_jacobi_aa (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_c (int i, int, int, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_jacobi_c (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_aa (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_c (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_with_line_solve_aa (Box const &, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, GpuArray< Real, AMREX_SPACEDIM > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_c (int i, int, int, Array4< Real > const &fine, Array4< Real const > const &crse, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_aa (int i, int, int, Array4< Real > const &fine, Array4< Real const > const &crse, Array4< Real const > const &sig, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_semi_interpadd_aa (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_ha (int i, int j, int k, Array4< Real > const &fine, Array4< Real const > const &crse, Array4< Real const > const &sig, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu (int i, int, int, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_rhcc (int i, int, int, Array4< Real const > const &rhcc, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_mknewu (int i, int, int, Array4< Real > const &u, Array4< Real const > const &p, Array4< Real const > const &sig, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_mknewu_c (int i, int, int, Array4< Real > const &u, Array4< Real const > const &p, Real sig, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_rhcc_fine_contrib (int, int, int, Box const &, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu_cf_contrib (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, Array4< int const > const &, Array4< int const > const &, GpuArray< Real, AMREX_SPACEDIM > const &, Box const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, bool) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_crse_resid (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, Box const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, bool) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, Array4< int const > const &, Array4< int const > const &, Array4< Real const > const &, GpuArray< Real, AMREX_SPACEDIM > const &, Box const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, bool) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib_cs (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Real, Array4< int const > const &, Array4< int const > const &, Array4< int const > const &, Array4< Real const > const &, GpuArray< Real, AMREX_SPACEDIM > const &, Box const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &, bool) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_set_stencil (Box const &, Array4< Real > const &, Array4< Real const > const &, GpuArray< Real, AMREX_SPACEDIM > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_set_stencil_s0 (int, int, int, Array4< Real > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_stencil_rap (int, int, int, Array4< Real > const &, Array4< Real const > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_sten (int, int, int, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
void | mlndlap_gauss_seidel_sten (Box const &, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_rap (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_restriction_rap (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE int | mlndlap_color (int i, int, int) |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_ha (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_aa (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_c (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_sten (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< Real const > const &, Array4< int const > const &, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_avgdown_coeff_y (int i, int j, int k, Array4< Real > const &crse, Array4< Real const > const &fine) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_ha (int i, int j, int k, Array4< Real const > const &x, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_aa (int i, int j, int k, Array4< Real const > const &x, Array4< Real const > const &sig, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_c (int i, int j, int k, Array4< Real const > const &x, Real sigma, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_normalize_ha (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_ha (int i, int j, int k, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_jacobi_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
void | mlndlap_gauss_seidel_aa (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
void | mlndlap_gauss_seidel_c (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
AMREX_FORCE_INLINE void | tridiagonal_solve (Array1D< Real, 0, 31 > &a_ls, Array1D< Real, 0, 31 > &b_ls, Array1D< Real, 0, 31 > &c_ls, Array1D< Real, 0, 31 > &r_ls, Array1D< Real, 0, 31 > &u_ls, Array1D< Real, 0, 31 > &gam, int ilen) noexcept |
|
void | mlndlap_gauss_seidel_with_line_solve_aa (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_line_x (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int ic, int jc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_line_y (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int ic, int jc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_face_xy (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int ic, int jc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | ha_interp_face_xy (Array4< Real const > const &crse, Array4< Real const > const &sigx, Array4< Real const > const &sigy, int i, int j, int ic, int jc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_ha (int i, int j, int, Array4< Real > const &fine, Array4< Real const > const &crse, Array4< Real const > const &sigx, Array4< Real const > const &sigy, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu (int i, int j, int k, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &nodal_domain, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_mknewu (int i, int j, int k, Array4< Real > const &u, Array4< Real const > const &p, Array4< Real const > const &sig, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_mknewu_c (int i, int j, int k, Array4< Real > const &u, Array4< Real const > const &p, Real sig, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_sum_Df (int ii, int jj, Real facx, Real facy, Array4< Real const > const &vel, Box const &velbx, bool is_rz) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu_fine_contrib (int i, int j, int, Box const &fvbx, Box const &velbx, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< Real const > const &frhs, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | neumann_scale (int i, int j, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu_cf_contrib (int i, int j, int, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< Real const > const &fc, Array4< Real const > const &rhcc, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &veldom, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
template<typename P , typename S > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_sum_Ax (P const &pred, S const &sig, int i, int j, Real facx, Real facy, Array4< Real const > const &phi, bool is_rz) noexcept |
|
template<int rr, typename S > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib_doit (S const &sig, int i, int j, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib (int i, int j, int, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Array4< Real const > const &sig, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib_cs (int i, int j, int, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Real const sig, Array4< int const > const &msk, bool is_rz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib (int i, int j, int, Array4< Real > const &res, Array4< Real const > const &phi, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, Array4< Real const > const &fc, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &nddom, bool is_rz, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi, bool neumann_doubling) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib_cs (int i, int j, int, Array4< Real > const &res, Array4< Real const > const &phi, Array4< Real const > const &rhs, Real const sig, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, Array4< Real const > const &fc, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &nddom, bool is_rz, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi, bool neumann_doubling) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_sten_doit (int i, int j, int k, Array4< Real const > const &x, Array4< Real const > const &sten) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_gauss_seidel_sten (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sten, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_ha (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color, bool is_rz) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_aa (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color, bool is_rz) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_c (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Real sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color, bool is_rz) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_avgdown_coeff_z (int i, int j, int k, Array4< Real > const &crse, Array4< Real const > const &fine) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_adotx_ha (int i, int j, int k, Array4< Real const > const &x, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_normalize_ha (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_ha (int i, int j, int k, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_jacobi_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
void | mlndlap_gauss_seidel_ha (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_line_x (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_line_y (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_line_z (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_face_xy (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_face_xz (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | aa_interp_face_yz (Array4< Real const > const &crse, Array4< Real const > const &sig, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | ha_interp_face_xy (Array4< Real const > const &crse, Array4< Real const > const &sigx, Array4< Real const > const &sigy, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | ha_interp_face_xz (Array4< Real const > const &crse, Array4< Real const > const &sigx, Array4< Real const > const &sigz, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | ha_interp_face_yz (Array4< Real const > const &crse, Array4< Real const > const &sigy, Array4< Real const > const &sigz, int i, int j, int k, int ic, int jc, int kc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_interpadd_ha (int i, int j, int k, Array4< Real > const &fine, Array4< Real const > const &crse, Array4< Real const > const &sigx, Array4< Real const > const &sigy, Array4< Real const > const &sigz, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_sum_Df (int ii, int jj, int kk, Real facx, Real facy, Real facz, Array4< Real const > const &vel, Box const &velbx) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu_fine_contrib (int i, int j, int k, Box const &fvbx, Box const &velbx, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< Real const > const &frhs, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | neumann_scale (int i, int j, int k, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_divu_cf_contrib (int i, int j, int k, Array4< Real > const &rhs, Array4< Real const > const &vel, Array4< Real const > const &fc, Array4< Real const > const &rhcc, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &veldom, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
template<typename P , typename S > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mlndlap_sum_Ax (P const &pred, S const &sig, int i, int j, int k, Real facx, Real facy, Real facz, Array4< Real const > const &phi) noexcept |
|
template<int rr, typename S > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib_doit (S const &sig, int i, int j, int k, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib (int i, int j, int k, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Array4< Real const > const &sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_Ax_fine_contrib_cs (int i, int j, int k, Box const &ndbx, Box const &ccbx, Array4< Real > const &f, Array4< Real const > const &res, Array4< Real const > const &rhs, Array4< Real const > const &phi, Real const sig, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib (int i, int j, int k, Array4< Real > const &res, Array4< Real const > const &phi, Array4< Real const > const &rhs, Array4< Real const > const &sig, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, Array4< Real const > const &fc, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi, bool neumann_doubling) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_res_cf_contrib_cs (int i, int j, int k, Array4< Real > const &res, Array4< Real const > const &phi, Array4< Real const > const &rhs, Real const sig, Array4< int const > const &dmsk, Array4< int const > const &ndmsk, Array4< int const > const &ccmsk, Array4< Real const > const &fc, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Box const &ccdom_p, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi, bool neumann_doubling) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mlndlap_gscolor_ha (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< Real const > const &sx, Array4< Real const > const &sy, Array4< Real const > const &sz, Array4< int const > const &msk, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, int color) noexcept |
|
void | mlndlap_scale_neumann_bc (Real s, Box const &bx, Array4< Real > const &rhs, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &lobc, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &hibc) noexcept |
|
void | mlndlap_impose_neumann_bc (Box const &bx, Array4< Real > const &rhs, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &lobc, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &hibc) noexcept |
|
void | mlndlap_unimpose_neumann_bc (Box const &bx, Array4< Real > const &rhs, Box const &nddom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &lobc, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &hibc) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_normalize_sten (int i, int j, int k, Array4< Real > const &x, Array4< Real const > const &sten, Array4< int const > const &msk, Real s0_norm0) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_sten (int i, int j, int k, Array4< Real > const &sol, Real Ax, Array4< Real const > const &rhs, Array4< Real const > const &sten, Array4< int const > const &msk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_jacobi_sten (Box const &bx, Array4< Real > const &sol, Array4< Real const > const &Ax, Array4< Real const > const &rhs, Array4< Real const > const &sten, Array4< int const > const &msk) noexcept |
|
AMREX_FORCE_INLINE bool | mlndlap_any_fine_sync_cells (Box const &bx, Array4< int const > const &msk, int fine_flag) noexcept |
|
template<typename T > |
void | mlndlap_bc_doit (Box const &vbx, Array4< T > const &a, Box const &domain, GpuArray< bool, AMREX_SPACEDIM > const &bflo, GpuArray< bool, AMREX_SPACEDIM > const &bfhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_restriction (int i, int, int, Array4< Real > const &crse, Array4< Real const > const &fine, Array4< int const > const &msk) noexcept |
|
template<int rr> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_restriction (int i, int, int, Array4< Real > const &crse, Array4< Real const > const &fine, Array4< int const > const &msk, Box const &fdom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_semi_restriction (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_set_nodal_mask (int i, int, int, Array4< int > const &nmsk, Array4< int const > const &cmsk) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_set_dirichlet_mask (Box const &bx, Array4< int > const &dmsk, Array4< int const > const &omsk, Box const &dom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndlap_set_dot_mask (Box const &bx, Array4< Real > const &dmsk, Array4< int const > const &omsk, Box const &dom, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > const &bchi) noexcept |
|
template<typename T > |
void | mlndlap_fillbc_cc (Box const &vbx, Array4< T > const &sigma, Box const &domain, GpuArray< LinOpBCType, AMREX_SPACEDIM > bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > bchi) noexcept |
|
template<typename T > |
void | mlndlap_applybc (Box const &vbx, Array4< T > const &phi, Box const &domain, GpuArray< LinOpBCType, AMREX_SPACEDIM > bclo, GpuArray< LinOpBCType, AMREX_SPACEDIM > bchi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_interpadd (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_semi_interpadd (int, int, int, Array4< Real > const &, Array4< Real const > const &, Array4< int const > const &, int) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &msk, GpuArray< Real, 3 > const &s) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_gauss_seidel (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< int const > const &msk, GpuArray< Real, 3 > const &s) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_adotx (int i, int j, int k, Array4< Real > const &y, Array4< Real const > const &x, Array4< int const > const &msk, GpuArray< Real, 6 > const &s) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlndtslap_gauss_seidel (int i, int j, int k, Array4< Real > const &sol, Array4< Real const > const &rhs, Array4< int const > const &msk, GpuArray< Real, 6 > const &s) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_adotx (int i, Array4< T > const &y, Array4< T const > const &x, T dhx) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_adotx_os (int i, Array4< T > const &y, Array4< T const > const &x, Array4< int const > const &osm, T dhx) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_adotx_m (int i, Array4< T > const &y, Array4< T const > const &x, T dhx, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_x (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, T dxinv) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_x_m (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, T dxinv, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_xface (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, T dxinv, int xlen) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_xface_m (Box const &box, Array4< T > const &fx, Array4< T const > const &sol, T dxinv, int xlen, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_gsrb (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_gsrb_os (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, Array4< int const > const &osm, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_gsrb_m (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox, int redblack, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_jacobi (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_jacobi_os (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, Array4< int const > const &osm, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_jacobi_m (int i, int, int, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T dhx, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Box const &vbox, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_normalize (int i, int, int, Array4< T > const &x, T dhx, T dx, T probxlo) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_adotx (int i, int j, int k, Array4< T > const &y, Array4< T const > const &x, T dhx, T dhy, T dhz) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_adotx_os (int i, int j, int k, Array4< T > const &y, Array4< T const > const &x, Array4< int const > const &osm, T dhx, T dhy, T dhz) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_y (Box const &box, Array4< T > const &fy, Array4< T const > const &sol, T dyinv) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_yface (Box const &box, Array4< T > const &fy, Array4< T const > const &sol, T dyinv, int ylen) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_z (Box const &box, Array4< T > const &fz, Array4< T const > const &sol, T dzinv) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_flux_zface (Box const &box, Array4< T > const &fz, Array4< T const > const &sol, T dzinv, int zlen) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_gsrb (int i, int j, int k, Array4< T > const &phi, Array4< T const > const &rhs, T dhx, T dhy, T dhz, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Array4< T const > const &f2, Array4< int const > const &m2, Array4< T const > const &f3, Array4< int const > const &m3, Array4< T const > const &f4, Array4< int const > const &m4, Array4< T const > const &f5, Array4< int const > const &m5, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_gsrb_os (int i, int j, int k, Array4< T > const &phi, Array4< T const > const &rhs, Array4< int const > const &osm, T dhx, T dhy, T dhz, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Array4< T const > const &f2, Array4< int const > const &m2, Array4< T const > const &f3, Array4< int const > const &m3, Array4< T const > const &f4, Array4< int const > const &m4, Array4< T const > const &f5, Array4< int const > const &m5, Box const &vbox, int redblack) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_jacobi (int i, int j, int k, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, T dhx, T dhy, T dhz, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Array4< T const > const &f2, Array4< int const > const &m2, Array4< T const > const &f3, Array4< int const > const &m3, Array4< T const > const &f4, Array4< int const > const &m4, Array4< T const > const &f5, Array4< int const > const &m5, Box const &vbox) noexcept |
|
template<typename T > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mlpoisson_jacobi_os (int i, int j, int k, Array4< T > const &phi, Array4< T const > const &rhs, Array4< T const > const &Ax, Array4< int const > const &osm, T dhx, T dhy, T dhz, Array4< T const > const &f0, Array4< int const > const &m0, Array4< T const > const &f1, Array4< int const > const &m1, Array4< T const > const &f2, Array4< int const > const &m2, Array4< T const > const &f3, Array4< int const > const &m3, Array4< T const > const &f4, Array4< int const > const &m4, Array4< T const > const &f5, Array4< int const > const &m5, Box const &vbox) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_corners (int icorner, Box const &vbox, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mylo, Array4< int const > const &mxhi, Array4< int const > const &myhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &etax, Array4< Real const > const &kapx, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &etay, Array4< Real const > const &kapy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &etax, Array4< Real const > const &kapx, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &etay, Array4< Real const > const &kapy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_os (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< int const > const &osm, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fx (Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fy (Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xlo_ylo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mylo, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalylo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xlo_domain, bool ylo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xhi_ylo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxhi, Array4< int const > const &mylo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalylo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xhi_domain, bool ylo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xlo_yhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &myhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xlo_domain, bool yhi_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xhi_yhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxhi, Array4< int const > const &myhi, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xhi_domain, bool yhi_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xlo_zlo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mzlo, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalzlo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xlo_domain, bool zlo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xhi_zlo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxhi, Array4< int const > const &mzlo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalzlo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xhi_domain, bool zlo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xlo_zhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mzhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xlo_domain, bool zhi_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_xhi_zhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mxhi, Array4< int const > const &mzhi, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool xhi_domain, bool zhi_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_ylo_zlo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mylo, Array4< int const > const &mzlo, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalzlo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool ylo_domain, bool zlo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_yhi_zlo (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &myhi, Array4< int const > const &mzlo, Array4< Real const > const &bcvalyhi, Array4< Real const > const &bcvalzlo, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool yhi_domain, bool zlo_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_ylo_zhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &mylo, Array4< int const > const &mzhi, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool ylo_domain, bool zhi_domain) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges_yhi_zhi (int const i, int const j, int const k, Dim3 const &blen, Array4< Real > const &vel, Array4< int const > const &myhi, Array4< int const > const &mzhi, Array4< Real const > const &bcvalyhi, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, bool yhi_domain, bool zhi_domain) noexcept |
|
void | mltensor_fill_edges (Box const &vbox, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mylo, Array4< int const > const &mzlo, Array4< int const > const &mxhi, Array4< int const > const &myhi, Array4< int const > const &mzhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalzlo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalyhi, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void | mltensor_fill_edges (int const bid, int const tid, int const bdim, Box const &vbox, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mylo, Array4< int const > const &mzlo, Array4< int const > const &mxhi, Array4< int const > const &myhi, Array4< int const > const &mzhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalzlo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalyhi, Array4< Real const > const &bcvalzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dz_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dz_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dx_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dy_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &etaz, Array4< Real const > const &kapz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dz_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dz_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dzi, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dx_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dy_on_zface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, Array4< Real const > const &etaz, Array4< Real const > const &kapz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &fz, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_cross_terms_os (Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< Real const > const &fz, Array4< int const > const &osm, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mltensor_vel_grads_fz (Box const &box, Array4< Real > const &fz, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Array4< Real const > const &bvzlo, Array4< Real const > const &bvzhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dy_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dx_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dy_on_xface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi, Array4< Real const > const &bvxlo, Array4< Real const > const &bvxhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real | mltensor_dx_on_yface (int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi, Array4< Real const > const &bvylo, Array4< Real const > const &bvyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Dim3 const &dlo, Dim3 const &dhi) noexcept |
|
template<int N, typename T , typename M , typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | pcg_solve (T *AMREX_RESTRICT x, T *AMREX_RESTRICT r, M const &mat, P const &precond, int maxiter, T rel_tol) |
| Preconditioned conjugate gradient solver. More...
|
|
template<class T > |
constexpr decltype(T::is_particle_tile_data) | IsParticleTileData () |
|
template<class T , class... Args> |
constexpr bool | IsParticleTileData (Args...) |
|
template<typename A , typename B , std::enable_if_t< std::is_same_v< std::remove_cv_t< A >, std::remove_cv_t< B > >, int > = 0> |
bool | isSame (A const *pa, B const *pb) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::uint64_t | SetParticleIDandCPU (Long id, int cpu) noexcept |
|
template<int NReal, int NInt> |
std::ostream & | operator<< (std::ostream &os, const Particle< NReal, NInt > &p) |
|
template<int NReal> |
std::ostream & | operator<< (std::ostream &os, const Particle< NReal, 0 > &p) |
|
template<int NInt> |
std::ostream & | operator<< (std::ostream &os, const Particle< 0, NInt > &p) |
|
template<int NReal = 0, int NInt = 0> |
std::ostream & | operator<< (std::ostream &os, const Particle< 0, 0 > &p) |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_deposit_cic (P const &p, int nc, amrex::Array4< amrex::Real > const &rho, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi) |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | amrex_deposit_particle_dx_cic (P const &p, int nc, amrex::Array4< amrex::Real > const &rho, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &pdxi) |
|
template<class PC , class Buffer , std::enable_if_t< IsParticleContainer< PC >::value &&std::is_base_of_v< PolymorphicArenaAllocator< typename Buffer::value_type >, Buffer >, int > foo = 0> |
void | packBuffer (const PC &pc, const ParticleCopyOp &op, const ParticleCopyPlan &plan, Buffer &snd_buffer) |
|
template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | unpackBuffer (PC &pc, const ParticleCopyPlan &plan, const Buffer &snd_buffer, UnpackPolicy const &policy) |
|
template<class PC , class SndBuffer , class RcvBuffer , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | communicateParticlesStart (const PC &pc, ParticleCopyPlan &plan, const SndBuffer &snd_buffer, RcvBuffer &rcv_buffer) |
|
void | communicateParticlesFinish (const ParticleCopyPlan &plan) |
|
template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | unpackRemotes (PC &pc, const ParticleCopyPlan &plan, Buffer &rcv_buffer, UnpackPolicy const &policy) |
|
template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | ParticleToMesh (PC const &pc, MF &mf, int lev, F const &f, bool zero_out_input=true) |
|
template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
void | MeshToParticle (PC &pc, MF const &mf, int lev, F const &f) |
|
Long | CountSnds (const std::map< int, Vector< char > > ¬_ours, Vector< Long > &Snds) |
|
Long | doHandShake (const std::map< int, Vector< char > > ¬_ours, Vector< Long > &Snds, Vector< Long > &Rcvs) |
|
Long | doHandShakeLocal (const std::map< int, Vector< char > > ¬_ours, const Vector< int > &neighbor_procs, Vector< Long > &Snds, Vector< Long > &Rcvs) |
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceSum (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceSum (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceSum (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMax (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMax (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMax (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMin (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMin (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
auto | ReduceMin (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ParticleTileType::ConstParticleTileDataType(), int())) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalAnd (PC const &pc, F &&f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalAnd (PC const &pc, int lev, F &&f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalAnd (PC const &pc, int lev_min, int lev_max, F const &f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalOr (PC const &pc, F &&f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalOr (PC const &pc, int lev, F &&f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
bool | ReduceLogicalOr (PC const &pc, int lev_min, int lev_max, F const &f) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
RD::Type | ParticleReduce (PC const &pc, F &&f, ReduceOps &reduce_ops) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels. More...
|
|
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
RD::Type | ParticleReduce (PC const &pc, int lev, F &&f, ReduceOps &reduce_ops) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level. More...
|
|
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
RD::Type | ParticleReduce (PC const &pc, int lev_min, int lev_max, F const &f, ReduceOps &reduce_ops) |
| A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max. More...
|
|
template<typename T_ParticleType , int NAR, int NAI> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | copyParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ConstParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept |
| A general single particle copying routine that can run on the GPU. More...
|
|
template<typename T_ParticleType , int NAR, int NAI> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | copyParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept |
| A general single particle copying routine that can run on the GPU. More...
|
|
template<typename T_ParticleType , int NAR, int NAI> |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | swapParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept |
| A general single particle swapping routine that can run on the GPU. More...
|
|
template<typename DstTile , typename SrcTile > |
void | copyParticles (DstTile &dst, const SrcTile &src) noexcept |
| Copy particles from src to dst. This version copies all the particles, writing them to the beginning of dst. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
void | copyParticles (DstTile &dst, const SrcTile &src, Index src_start, Index dst_start, N n) noexcept |
| Copy particles from src to dst. This version copies n particles starting at index src_start, writing the result starting at dst_start. More...
|
|
template<typename DstTile , typename SrcTile , typename F > |
void | transformParticles (DstTile &dst, const SrcTile &src, F &&f) noexcept |
| Apply the function f to all the particles in src, writing the result to dst. This version does all the particles in src. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
void | transformParticles (DstTile &dst, const SrcTile &src, Index src_start, Index dst_start, N n, F const &f) noexcept |
| Apply the function f to particles in src, writing the result to dst. This version applies the function to n particles starting at index src_start, writing the result starting at dst_start. More...
|
|
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename F > |
void | transformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, F &&f) noexcept |
| Apply the function f to all the particles in src, writing the results to dst1 and dst2. This version does all the particles in src. More...
|
|
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
void | transformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Index src_start, Index dst1_start, Index dst2_start, N n, F const &f) noexcept |
| Apply the function f to particles in src, writing the results to dst1 and dst2. This version applies the function to n particles starting at index src_start, writing the result starting at dst1_start and dst2_start. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
Index | filterParticles (DstTile &dst, const SrcTile &src, const Index *mask) noexcept |
| Conditionally copy particles from src to dst based on the value of mask. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
Index | filterParticles (DstTile &dst, const SrcTile &src, const Index *mask, Index src_start, Index dst_start, N n) noexcept |
| Conditionally copy particles from src to dst based on the value of mask. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start. More...
|
|
template<typename DstTile , typename SrcTile , typename Pred , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred >>, int > foo = 0> |
int | filterParticles (DstTile &dst, const SrcTile &src, Pred &&p) noexcept |
| Conditionally copy particles from src to dst based on a predicate. More...
|
|
template<typename DstTile , typename SrcTile , typename Pred , typename Index , typename N , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred >>, Index > nvccfoo = 0> |
Index | filterParticles (DstTile &dst, const SrcTile &src, Pred const &p, Index src_start, Index dst_start, N n) noexcept |
| Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
Index | filterAndTransformParticles (DstTile &dst, const SrcTile &src, Index *mask, F const &f, Index src_start, Index dst_start) noexcept |
| Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy. More...
|
|
template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
Index | filterAndTransformParticles (DstTile &dst, const SrcTile &src, Index *mask, F &&f) noexcept |
| Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy. More...
|
|
template<typename DstTile , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred >>, int > foo = 0> |
int | filterAndTransformParticles (DstTile &dst, const SrcTile &src, Pred &&p, F &&f) noexcept |
| Conditionally copy particles from src to dst based on a predicate. A transformation will also be applied to the particles on copy. More...
|
|
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
Index | filterAndTransformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Index *mask, F const &f) noexcept |
| Conditionally copy particles from src to dst1 and dst2 based on the value of mask. A transformation will also be applied to the particles on copy. More...
|
|
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred >>, int > foo = 0> |
int | filterAndTransformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Pred const &p, F &&f) noexcept |
| Conditionally copy particles from src to dst1 and dst2 based on a predicate. A transformation will also be applied to the particles on copy. More...
|
|
template<typename DstTile , typename SrcTile , typename Pred , typename F , typename Index , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred >>, Index > nvccfoo = 0> |
Index | filterAndTransformParticles (DstTile &dst, const SrcTile &src, Pred const &p, F &&f, Index src_start, Index dst_start) noexcept |
| Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start. More...
|
|
template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
void | gatherParticles (PTile &dst, const PTile &src, N np, const Index *inds) |
| Gather particles copies particles into contiguous order from an arbitrary order. Specifically, the particle at the index inds[i] in src will be copied to the index i in dst. More...
|
|
template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0> |
void | scatterParticles (PTile &dst, const PTile &src, N np, const Index *inds) |
| Scatter particles copies particles from contiguous order into an arbitrary order. Specifically, the particle at the index i in src will be copied to the index inds[i] in dst. More...
|
|
IntVect | computeRefFac (const ParGDBBase *a_gdb, int src_lev, int lev) |
|
Vector< int > | computeNeighborProcs (const ParGDBBase *a_gdb, int ngrow) |
|
template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value, int > foo = 0> |
int | numParticlesOutOfRange (Iterator const &pti, int nGrow) |
| Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator. More...
|
|
template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value &&!Iterator::ContainerType::ParticleType::is_soa_particle, int > foo = 0> |
int | numParticlesOutOfRange (Iterator const &pti, IntVect nGrow) |
| Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator. More...
|
|
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
int | numParticlesOutOfRange (PC const &pc, int nGrow) |
| Returns the number of particles that are more than nGrow cells from their assigned box. More...
|
|
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
int | numParticlesOutOfRange (PC const &pc, IntVect nGrow) |
| Returns the number of particles that are more than nGrow cells from their assigned box. More...
|
|
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
int | numParticlesOutOfRange (PC const &pc, int lev_min, int lev_max, int nGrow) |
| Returns the number of particles that are more than nGrow cells from their assigned box. More...
|
|
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0> |
int | numParticlesOutOfRange (PC const &pc, int lev_min, int lev_max, IntVect nGrow) |
| Returns the number of particles that are more than nGrow cells from their assigned box. More...
|
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | getTileIndex (const IntVect &iv, const Box &box, const bool a_do_tiling, const IntVect &a_tile_size, Box &tbx) |
|
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | numTilesInBox (const Box &box, const bool a_do_tiling, const IntVect &a_tile_size) |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVect | getParticleCell (P const &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi) noexcept |
| Returns the cell index for a given particle using the provided lower bounds and cell sizes. More...
|
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVect | getParticleCell (P const &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const Box &domain) noexcept |
| Returns the cell index for a given particle using the provided lower bounds, cell sizes and global domain offset. More...
|
|
template<typename PTD > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVect | getParticleCell (PTD const &ptd, int i, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const Box &domain) noexcept |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int | getParticleGrid (P const &p, amrex::Array4< int > const &mask, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const Box &domain) noexcept |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool | enforcePeriodic (P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &phi, amrex::GpuArray< amrex::ParticleReal, AMREX_SPACEDIM > const &rlo, amrex::GpuArray< amrex::ParticleReal, AMREX_SPACEDIM > const &rhi, amrex::GpuArray< int, AMREX_SPACEDIM > const &is_per) noexcept |
|
template<typename PTile , typename ParFunc > |
int | partitionParticles (PTile &ptile, ParFunc const &is_left) |
| Reorders the ParticleTile into two partitions left [0, num_left-1] and right [num_left, ptile.numParticles()-1] and returns the number of particles in the left partition. More...
|
|
template<typename PTile > |
void | removeInvalidParticles (PTile &ptile) |
|
template<typename PTile , typename PLocator , typename CellAssignor > |
int | partitionParticlesByDest (PTile &ptile, const PLocator &ploc, CellAssignor const &assignor, const ParticleBufferMap &pmap, const GpuArray< Real, AMREX_SPACEDIM > &plo, const GpuArray< Real, AMREX_SPACEDIM > &phi, const GpuArray< ParticleReal, AMREX_SPACEDIM > &rlo, const GpuArray< ParticleReal, AMREX_SPACEDIM > &rhi, const GpuArray< int, AMREX_SPACEDIM > &is_per, int lev, int gid, int, int lev_min, int lev_max, int nGrow, bool remove_negative) |
|
template<class PC1 , class PC2 > |
bool | SameIteratorsOK (const PC1 &pc1, const PC2 &pc2) |
|
template<class PC > |
void | EnsureThreadSafeTiles (PC &pc) |
|
template<class index_type , typename F > |
void | PermutationForDeposition (Gpu::DeviceVector< index_type > &perm, index_type nitems, index_type nbins, F const &f) |
|
template<class index_type , class PTile > |
void | PermutationForDeposition (Gpu::DeviceVector< index_type > &perm, index_type nitems, const PTile &ptile, Box bx, Geometry geom, const IntVect idx_type) |
|
template<typename P > |
std::string | getDefaultCompNameReal (const int i) |
|
template<typename P > |
std::string | getDefaultCompNameInt (const int i) |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cic_interpolate (const P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const amrex::Array4< amrex::Real const > &data_arr, amrex::ParticleReal *val, int M=AMREX_SPACEDIM) |
| Linearly interpolates the mesh data to the particle position from cell-centered data. More...
|
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cic_interpolate_cc (const P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const amrex::Array4< amrex::Real const > &data_arr, amrex::ParticleReal *val, int M=AMREX_SPACEDIM) |
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | cic_interpolate_nd (const P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const amrex::Array4< amrex::Real const > &data_arr, amrex::ParticleReal *val, int M=AMREX_SPACEDIM) |
| Linearly interpolates the mesh data to the particle position from node-centered data. More...
|
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | mac_interpolate (const P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, amrex::GpuArray< amrex::Array4< amrex::Real const >, AMREX_SPACEDIM > const &data_arr, amrex::ParticleReal *val) |
| Linearly interpolates the mesh data to the particle position from face-centered data. The nth component of the data_arr array is nodal in the nth direction, and cell-centered in the others. More...
|
|
template<typename P > |
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void | linear_interpolate_to_particle (const P &p, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &plo, amrex::GpuArray< amrex::Real, AMREX_SPACEDIM > const &dxi, const Array4< amrex::Real const > *data_arr, amrex::ParticleReal *val, const IntVect *is_nodal, int start_comp, int ncomp, int num_arrays) |
| Linearly interpolates the mesh data to the particle position from mesh data. This general form can handle an arbitrary number of Array4s, each with different staggerings. More...
|
|