Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
amrex Namespace Reference

Namespaces

namespace  algoim
 
namespace  AsyncOut
 
namespace  BCType
 
namespace  BGColor
 
namespace  BinPolicy
 
namespace  Cuda
 
namespace  EB2
 
namespace  Extrapolater
 
namespace  FFT
 
namespace  FGColor
 
namespace  FileSystem
 
namespace  Font
 
namespace  Gpu
 
namespace  HostDevice
 
namespace  Lazy
 
namespace  literals
 
namespace  LongParticleIds
 
namespace  Machine
 
namespace  Math
 
namespace  MC
 
namespace  Morton
 
namespace  mpidatatypes
 
namespace  MPMD
 
namespace  NonLocalBC
 
namespace  openbc
 
namespace  OpenMP
 
namespace  ParallelAllGather
 
namespace  ParallelAllReduce
 
namespace  ParallelContext
 
namespace  ParallelDescriptor
 Parallel frontend that abstracts functionalities needed to spawn processes and handle communication.
 
namespace  ParallelGather
 
namespace  ParallelReduce
 
namespace  particle_impl
 
namespace  ParticleIdCpus
 
namespace  ParticleInterpolator
 
namespace  PhysBCType
 
namespace  Reduce
 
namespace  RungeKutta
 Functions for Runge-Kutta methods.
 
namespace  Scan
 
namespace  simd
 
namespace  sundials
 
namespace  SundialsUserFun
 
namespace  system
 
namespace  VectorGrowthStrategy
 

Classes

class  AlgPartition
 
class  AlgVector
 
class  AllPrint
 Print on all processors of the default communicator. More...
 
class  AllPrintToFile
 Print on all processors of the default communicator. More...
 
class  Amr
 Manage hierarchy of levels for time-dependent AMR computations. More...
 
struct  AmrAssignGrid
 
class  AmrCore
 Provide basic functionalities to set up an AMR hierarchy. More...
 
class  AMRErrorTag
 
struct  AMRErrorTagInfo
 
class  AMReX
 
struct  AmrInfo
 
class  AmrLevel
 Virtual base class for managing individual levels. AmrLevel functions both as a container for state data on a level and also manages the advancement of data in time. More...
 
class  AmrMesh
 
class  AmrParGDB
 
class  AmrParticleContainer_impl
 
class  AmrParticleLocator
 
class  AmrTracerParticleContainer
 
class  Any
 
class  Arena
 A virtual base class for objects that manage their own dynamic memory allocation. More...
 
class  ArenaAllocator
 
struct  ArenaAllocatorBase
 
struct  ArenaInfo
 
struct  ArenaWrapper
 
struct  Array1D
 
struct  Array2D
 
struct  Array3D
 
struct  Array4BoxOffsetTag
 
struct  Array4BoxOrientationTag
 
struct  Array4BoxTag
 
struct  Array4BoxValTag
 
struct  Array4CopyTag
 
struct  Array4MaskCopyTag
 
struct  Array4PairTag
 
struct  Array4Tag
 
struct  ArrayND
 A multidimensional array accessor. More...
 
class  ArrayOfStructs
 
struct  AssignGrid
 
struct  AssignGridFilter
 
class  AsyncArenaAllocator
 
struct  AsyncArenaWrapper
 
class  AuxBoundaryData
 
class  BackgroundThread
 
class  BArena
 A Concrete Class for Dynamic Memory Management This is the simplest dynamic memory management class derived from Arena. Makes calls to std::malloc and std::free. More...
 
class  BaseFab
 A FortranArrayBox(FAB)-like object. More...
 
class  BCRec
 Boundary Condition Records. Necessary information and functions for computing boundary conditions. More...
 
struct  BinIterator
 
struct  BinMapper
 
struct  BLBackTrace
 
class  BLBTer
 
struct  BlockMutex
 
class  BLProfiler
 
class  BndryDataT
 A BndryData stores and manipulates boundary data information on each side of each box in a BoxArray. More...
 
class  BndryFuncArray
 This version calls function working on array. More...
 
class  BndryRegisterT
 A BndryRegister organizes FabSets bounding each grid in a BoxArray. A FabSet is maintained for each boundary orientation, as well as the BoxArray domain of definition. More...
 
class  BoundCond
 Maintain an identifier for boundary condition types. More...
 
class  BoxArray
 A collection of Boxes stored in an Array. More...
 
class  BoxConverter
 
class  BoxDomain
 A List of Disjoint Boxes. More...
 
struct  BoxIndexerND
 
struct  BoxIndexerND< 1 >
 
class  BoxIteratorND
 iterates through the IntVects of a Box More...
 
class  BoxList
 A class for managing a List of Boxes that share a common IndexType. This class implements operations for sets of Boxes. This is a concrete class, not a polymorphic one. More...
 
class  BoxND
 A Rectangular Domain on an Integer Lattice. More...
 
class  CArena
 A Concrete Class for Dynamic Memory Management using first fit. This is a coalescing memory manager. It allocates (possibly) large chunks of heap space and apportions it out as requested. It merges together neighboring chunks on each free(). More...
 
class  CellBilinear
 Bilinear interpolation on cell centered data. More...
 
class  CellConservativeLinear
 Linear conservative interpolation on cell centered data. More...
 
class  CellConservativeProtected
 Lin. cons. interp. on cc data with protection against under/over-shoots. More...
 
class  CellConservativeQuartic
 Conservative quartic interpolation on cell averaged data. More...
 
struct  CellData
 
struct  CellIndexEnum
 Type for defining CellIndex so that all IndexTypeND with different dimensions have the same CellIndex type. More...
 
class  CellQuadratic
 Quadratic interpolation on cell centered data. More...
 
class  CellQuartic
 Quartic interpolation on cell centered data. More...
 
class  Cluster
 A cluster of tagged cells. More...
 
class  ClusterList
 A list of Cluster objects. More...
 
struct  CommRecvBufTag
 
struct  CommSendBufTag
 
struct  CompileTimeOptions
 
struct  Conjunction
 Logical traits let us combine multiple type requirements in one enable_if_t clause. More...
 
struct  Conjunction< B1 >
 
struct  Conjunction< B1, Bn... >
 
struct  ConstParticleCPUWrapper
 
struct  ConstParticleIDWrapper
 
struct  ConstParticleTileData
 
struct  ConstSoAParticle
 
class  CoordSys
 Coordinate System. More...
 
class  CpuBndryFuncFab
 This cpu version calls function working on FArrayBox. More...
 
struct  CSR
 
struct  CsrSorted
 Sorted CSR means for each row the column indices are sorted. More...
 
struct  CsrValid
 Valid CSR means all entries are valid. It may be sorted ro unsorted. More...
 
struct  CsrView
 
class  CutFab
 
struct  DataAllocator
 
struct  DataDeleter
 
struct  DataLayoutPolicy
 
struct  DataLayoutPolicy< ContainerType, ParticleType< Types... >, DataLayout::AoS >
 
struct  DataLayoutPolicy< ContainerType, ParticleType< Types... >, DataLayout::SoA >
 
struct  DataLayoutPolicyRaw
 
struct  DataLayoutPolicyRaw< ParticleType< Types... >, DataLayout::AoS >
 
struct  DataLayoutPolicyRaw< ParticleType< Types... >, DataLayout::SoA >
 
struct  DefaultAssignor
 
class  DefaultFabFactory
 
struct  DefinitelyNotHostRunnable
 
struct  DenseBinIteratorFactory
 
class  DenseBins
 A container for storing items in a set of bins. More...
 
class  DeriveList
 A list of DeriveRecs. More...
 
class  DeriveRec
 Derived Type Record. More...
 
class  DescriptorList
 
struct  DestComp
 
class  DeviceArenaAllocator
 
struct  DeviceArenaWrapper
 
struct  Dim3
 
struct  Disjunction
 
struct  Disjunction< B1 >
 
struct  Disjunction< B1, Bn... >
 
class  distFcnElement2d
 
class  DistributionMapping
 Calculates the distribution of FABs to MPI processes. More...
 
struct  Divides
 
struct  DynamicTiling
 
class  EBCellConservativeLinear
 
class  EBCellFlag
 
class  EBCellFlagFab
 
struct  EBData
 
class  EBDataCollection
 
class  EBFArrayBox
 
class  EBFArrayBoxFactory
 
class  EBFluxRegister
 
class  EBMFCellConsLinInterp
 
class  EBToPVD
 
class  EdgeFluxRegister
 
class  ErrorList
 A List of ErrorRecs. More...
 
class  ErrorRec
 Error Record. More...
 
class  expect
 
class  FabArray
 An Array of FortranArrayBox(FAB)-like Objects. More...
 
class  FabArrayBase
 Base class for FabArray. More...
 
class  FabArrayCopyDescriptor
 This class orchestrates filling a destination fab of size destFabBox from fabarray on the local processor (myProc). More...
 
class  FabArrayId
 
struct  FabCopyDescriptor
 
struct  FabCopyTag
 
struct  FabDataType
 
struct  FabDataType< T, std::enable_if_t< IsMultiFabLike_v< T > > >
 
struct  FabDataType< T, std::enable_if_t< IsMultiFabLike_v< typename T::value_type > > >
 
class  FabFactory
 
struct  FabFillNoOp
 
struct  FabInfo
 
class  FABio
 A Class Facilitating I/O for Fabs. More...
 
class  FABio_8bit
 
class  FABio_ascii
 
class  FABio_binary
 
class  FabSetIter
 
class  FabSetT
 A FabSet is a group of FArrayBox's. The grouping is designed specifically to represent regions along the boundary of Box's, and are used to implement boundary conditions to discretized partial differential equations. More...
 
class  FaceConservativeLinear
 Bilinear tangential interpolation / linear normal interpolation of face data. More...
 
class  FaceDivFree
 Divergence-preserving interpolation on face centered data. More...
 
class  FaceLinear
 Piecewise constant tangential interpolation / linear normal interpolation of face data. More...
 
class  FArrayBox
 A Fortran Array of REALs. More...
 
struct  FatPtr
 
struct  FBData
 
class  FEIntegrator
 
class  FillBoxId
 
class  FillPatcher
 FillPatcher is for filling a fine level MultiFab/FabArray. More...
 
class  FillPatchIterator
 
class  FillPatchIteratorHelper
 
struct  FilterPositiveID
 
struct  FilterVirt
 
class  FluxRegister
 Flux Register. More...
 
class  ForkJoin
 
class  FPC
 A Collection of Floating-Point Constants Supporting FAB I/O. More...
 
class  Geometry
 Rectangular problem domain geometry. More...
 
struct  GeometryData
 
struct  GetBucket
 
struct  GetParticleBin
 
struct  GetPID
 
struct  GetSendBufferOffset
 
class  GMRES
 GMRES. More...
 
class  GMRES_MV
 
class  GMRESMLMGT
 Solve using GMRES with multigrid as preconditioner. More...
 
struct  GPUable
 
class  GpuArray
 Fixed-size array that can be used on GPU. More...
 
class  GpuBndryFuncFab
 
struct  GpuComplex
 A host / device complex number type, because std::complex doesn't work in device code with Cuda yet. More...
 
class  GpuTuple
 GPU-compatible tuple. More...
 
struct  GpuTupleElement
 
struct  GpuTupleElement< 0, GpuTuple< Head, Tail... > >
 
struct  GpuTupleElement< I, GpuTuple< Head, Tail... > >
 
struct  GpuTupleSize
 
struct  GpuTupleSize< GpuTuple< Ts... > >
 
struct  HasAtomicAdd
 
struct  HasAtomicAdd< double >
 
struct  HasAtomicAdd< float >
 
struct  HasAtomicAdd< int >
 
struct  HasAtomicAdd< long >
 
struct  HasAtomicAdd< unsigned int >
 
struct  HasAtomicAdd< unsigned long long >
 
struct  HasMultiComp
 
class  Hypre
 
class  HypreABecLap
 
class  HypreABecLap2
 
class  HypreABecLap3
 
class  HypreIJIface
 
class  HypreMLABecLap
 
class  HypreNodeLap
 
class  HypreSolver
 Solve Ax = b using HYPRE's generic IJ matrix format where A is a sparse matrix specified using the compressed sparse row (CSR) format. More...
 
class  IArrayBox
 A Fortran Array of ints. More...
 
class  IFABio
 
class  iMultiFab
 A Collection of IArrayBoxes. More...
 
class  IndexTypeND
 Cell-Based or Node-Based Indices. More...
 
class  IntDescriptor
 A Descriptor of the Long Integer type. More...
 
class  IntegratorBase
 
struct  IntegratorOps
 
struct  IntegratorOps< T, std::enable_if_t< std::is_base_of_v< amrex::ParticleContainerBase, T > > >
 
struct  IntegratorOps< T, std::enable_if_t< std::is_same_v< amrex::MultiFab, T > > >
 
struct  IntegratorOps< T, std::enable_if_t< std::is_same_v< amrex::Vector< amrex::MultiFab >, T > > >
 
class  InterpBase
 
class  InterpBndryDataT
 An InterpBndryData object adds to a BndryData object the ability to manipulate and set the data stored in the boundary cells. More...
 
class  InterpFaceRegister
 InterpFaceRegister is a coarse/fine boundary register for interpolation of face data at the coarse/fine boundary. More...
 
class  Interpolater
 Virtual base class for interpolaters. More...
 
class  InterpolaterBoxCoarsener
 
class  IntVectND
 An Integer Vector in dim-Dimensional Space. More...
 
class  IOFormatSaver
 
class  IParser
 
struct  IParserExecutor
 
struct  is_soa_particle
 
struct  IsAddAssignable
 
struct  IsAddAssignable< T, std::void_t< decltype(std::declval< T & >()+=std::declval< T >())> >
 
struct  IsAlgVector
 
struct  IsAlgVector< V, std::enable_if_t< std::is_same_v< AlgVector< typename V::value_type, typename V::allocator_type >, V > > >
 
struct  IsArenaAllocator
 
struct  IsArenaAllocator< T, std::enable_if_t< std::is_base_of_v< ArenaAllocatorBase< typename T::value_type, typename T::arena_wrapper_type >, T > > >
 
struct  IsBaseFab
 
struct  IsBaseFab< D, std::enable_if_t< std::is_base_of_v< BaseFab< typename D::value_type >, D > > >
 
struct  IsCallable
 Test if a given type T is callable with arguments of type Args... More...
 
struct  IsCallableR
 Test if a given type T is callable with arguments of type Args... More...
 
struct  IsConvertible
 Test if all the types Args... are automatically convertible to type T. More...
 
struct  IsFabArray
 
struct  IsFabArray< D, std::enable_if_t< std::is_base_of_v< FabArray< typename D::FABType::value_type >, D > > >
 
struct  IsMultiFabIterator
 
struct  IsMultiFabLike
 
struct  IsMultiFabLike< M, std::enable_if_t< IsFabArray_v< M > &&IsBaseFab_v< typename M::fab_type > > >
 
struct  IsNarrowingConversion
 
struct  IsParticleContainer
 
struct  IsParticleIterator
 
struct  IsPolymorphicArenaAllocator
 
struct  IsPolymorphicArenaAllocator< PolymorphicArenaAllocator< T > >
 
struct  IsStoreAtomic
 
struct  IsStoreAtomic< EBCellFlag >
 
class  JacobiSmoother
 
struct  KeepValidFilter
 
class  LayoutData
 a one-thingy-per-box distributed object More...
 
class  LevelBld
 Builds problem-specific AmrLevels. More...
 
class  LineDistFcnElement2d
 
struct  LinOpEnumType
 
struct  LogicalAnd
 
struct  LogicalOr
 
struct  LPInfo
 
class  LUSolver
 
struct  make_particle
 
struct  make_particle< T_ParticleType, std::enable_if_t< is_soa_particle< T_ParticleType >::value > >
 
class  ManagedArenaAllocator
 
struct  ManagedArenaWrapper
 
class  Mask
 
struct  Maximum
 
struct  MaybeDeviceRunnable
 
struct  MaybeHostDeviceRunnable
 
class  MemProfiler
 
struct  MemStat
 
class  MFCellBilinear
 [Bi|Tri]linear interpolation on cell centered data. More...
 
class  MFCellConsLinInterp
 Linear conservative interpolation on cell centered data. More...
 
class  MFCellConsLinMinmaxLimitInterp
 Linear conservative interpolation on cell centered data. More...
 
struct  MFInfo
 FabArray memory allocation information. More...
 
class  MFInterpolater
 
class  MFIter
 Iterator for looping ever tiles and boxes of amrex::FabArray based containers. More...
 
struct  MFItInfo
 
class  MFNodeBilinear
 
class  MFPCInterp
 Piecewise constant interpolation on cell-centered data. More...
 
struct  Minimum
 
struct  Minus
 
class  MLABecLaplacianT
 
class  MLALaplacianT
 
class  MLCellABecLapT
 
class  MLCellLinOpT
 
class  MLCGSolverT
 
class  MLCurlCurl
 curl (alpha curl E) + beta E = rhs More...
 
class  MLEBABecLap
 
class  MLEBNodeFDLaplacian
 
class  MLEBTensorOp
 
class  MLLinOpT
 
struct  MLMGABCEBTag
 
struct  MLMGABCTag
 
class  MLMGBndryT
 
class  MLMGT
 
class  MLNodeABecLaplacian
 
class  MLNodeLaplacian
 
class  MLNodeLinOp
 
class  MLNodeTensorLaplacian
 
class  MLPoissonT
 
class  MLTensorOp
 
struct  MultiArray4
 
class  MultiCutFab
 
class  MultiFab
 A collection (stored as an array) of FArrayBox objects. More...
 
class  MultiFabCopyDescriptor
 
class  MultiMask
 
class  MultiMaskIter
 
struct  Multiplies
 
struct  NeighborCode
 
struct  NeighborData
 
class  NeighborList
 
class  NeighborParticleContainer
 
struct  Neighbors
 
struct  NeighborUnpackPolicy
 
class  NFilesIter
 This class encapsulates writing to nfiles. More...
 
class  NodeBilinear
 Bilinear interpolation on node centered data. More...
 
struct  NullInterpHook
 
struct  NumComps
 
class  OpenBCSolver
 Open Boundary Poisson Solver. More...
 
class  Orientation
 Encapsulation of the Orientation of the Faces of a Box. More...
 
class  OrientationIter
 An Iterator over the Orientation of Faces of a Box. More...
 
class  ParConstIter_impl
 
struct  ParCsr
 GPU-ready non-owning CSR data container. More...
 
class  PArena
 This arena uses CUDA stream-ordered memory allocator if available. If not, use The_Arena(). More...
 
class  ParGDB
 we use this for non-Amr particle code More...
 
class  ParGDBBase
 
class  ParIter_impl
 
class  ParIterBase_impl
 
class  ParmParse
 Parse Parameters From Command Line and Input Files. More...
 
class  Parser
 
struct  ParserExecutor
 
struct  Particle
 The struct used to store particles. More...
 
struct  ParticleArray
 
struct  ParticleArrayAccessor
 
struct  ParticleBase
 
struct  ParticleBase< T, 0, 0 >
 
struct  ParticleBase< T, 0, NInt >
 
struct  ParticleBase< T, NReal, 0 >
 
class  ParticleBufferMap
 
struct  ParticleCommData
 A struct used for communicating particle data across processes during multi-level operations. More...
 
class  ParticleContainer_impl
 A distributed container for Particles sorted onto the levels, grids, and tiles of a block-structured AMR hierarchy. More...
 
class  ParticleContainerBase
 
struct  ParticleCopyOp
 
struct  ParticleCopyPlan
 
struct  ParticleCPUWrapper
 
struct  ParticleIDWrapper
 
struct  ParticleInitType
 A struct used to pass initial data into the various Init methods. This struct is used to pass initial data into the various Init methods of the particle container. That data should be initialized in the order real struct data, int struct data, real array data, int array data. If fewer components are specified than the template parameters specify for, a given component, then the extra values will be set to zero. If more components are specified, it is a compile-time error. More...
 
class  ParticleLocator
 
struct  ParticleLocData
 A struct used for storing a particle's position in the AMR hierarchy. More...
 
struct  ParticleTile
 
struct  ParticleTileData
 
struct  PCData
 
class  PCInterp
 Piecewise Constant interpolation on cell centered data. More...
 
class  Periodicity
 This provides length of period for periodic domains. 0 means it is not periodic in that direction. It is also assumed that the periodic domain starts with index 0. More...
 
class  PETScABecLap
 
class  PhysBCFunct
 
class  PhysBCFunctNoOp
 
class  PhysBCFunctUseCoarseGhost
 
class  PinnedArenaAllocator
 
struct  PinnedArenaWrapper
 
class  PlotFileData
 
class  PlotFileDataImpl
 
struct  Plus
 
class  PODVector
 Dynamically allocated vector for trivially copyable data. More...
 
class  PolymorphicArenaAllocator
 
struct  PolymorphicArenaWrapper
 
struct  PolymorphicArray4
 
class  Print
 This class provides the user with a few print options. More...
 
class  PrintToFile
 This class prints to a file with a given base name. More...
 
struct  RandomEngine
 
class  RealBox
 A Box with real dimensions. More...
 
class  RealDescriptor
 A Descriptor of the Real Type. More...
 
class  RealVectND
 A Real vector in dim-dimensional space. More...
 
struct  RedistributeUnpackPolicy
 
class  ReduceData
 
struct  ReduceOpLogicalAnd
 
struct  ReduceOpLogicalOr
 
struct  ReduceOpMax
 
struct  ReduceOpMin
 
class  ReduceOps
 
struct  ReduceOpSum
 
class  ref_wrapper
 
class  RKIntegrator
 
struct  RunOnGpu
 
struct  RunOnGpu< ArenaAllocator< T > >
 
struct  RunOnGpu< AsyncArenaAllocator< T > >
 
struct  RunOnGpu< DeviceArenaAllocator< T > >
 
struct  RunOnGpu< ManagedArenaAllocator< T > >
 
struct  Same
 
struct  Same< T, U >
 
class  SArena
 A STREAM-ordered memory arena. More...
 
struct  SIMDindex
 
struct  SmallMatrix
 Matrix class with compile-time size. More...
 
struct  SoAParticle
 
struct  SoAParticleBase
 
struct  SparseBinIteratorFactory
 
class  SparseBins
 A container for storing items in a set of bins using "sparse" storage. More...
 
class  SplineDistFcnElement2d
 
class  SpMatrix
 
struct  SrcComp
 
struct  Stack
 
class  StateData
 Current and previous level-time data. More...
 
class  StateDataPhysBCFunct
 
class  StateDescriptor
 Attributes of StateData. More...
 
class  STLtools
 
class  StreamRetry
 
struct  StructOfArrays
 
class  SundialsIntegrator
 
struct  SundialsUserData
 
struct  Table1D
 
struct  Table2D
 
struct  Table3D
 
struct  Table4D
 
class  TableData
 Multi-dimensional array class. More...
 
class  TagBox
 Tagged cells in a Box. More...
 
class  TagBoxArray
 An array of TagBoxes. More...
 
struct  TagVector
 
struct  TheFaArenaDeleter
 
struct  ThisParticleTileHasNoAoS
 
struct  ThisParticleTileHasNoParticleVector
 
struct  TileSize
 
class  TimeIntegrator
 
class  TinyProfiler
 A simple profiler that returns basic performance information (e.g. min, max, and average running time) More...
 
class  TinyProfileRegion
 
class  TracerParticleContainer
 
struct  TransformerGhost
 
struct  TransformerVirt
 
struct  TypeArray
 
struct  TypeList
 Struct for holding types. More...
 
struct  ValLocPair
 
class  Vector
 This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound checking when compiled with DEBUG=TRUE. More...
 
struct  VectorTag
 
class  VisMF
 File I/O for FabArray<FArrayBox>. Wrapper class for reading/writing FabArray<FArrayBox> objects to disk in various "smart" ways. More...
 
class  VisMFBuffer
 
struct  VoidCopyTag
 
struct  XDim3
 
class  YAFluxRegisterT
 

Typedefs

using DeriveFunc = void(*)(amrex::Real *data, const int &, const int &, const int &, const int &, const int &, const int &, const int *nvar, const amrex::Real *compdat, const int &, const int &, const int &, const int &, const int &, const int &, const int *ncomp, const int *lo, const int *hi, const int *domain_lo, const int *domain_hi, const amrex::Real *delta, const amrex::Real *xlo, const amrex::Real *time, const amrex::Real *dt, const int *bcrec, const int *level, const int *grid_no)
 Type of extern "C" function called by DeriveRec to compute derived quantity.
 
using DeriveFunc3D = void(*)(amrex::Real *data, const int *dlo, const int *dhi, const int *nvar, const amrex::Real *compdat, const int *clo, const int *chi, const int *ncomp, const int *lo, const int *hi, const int *domain_lo, const int *domain_hi, const amrex::Real *delta, const amrex::Real *xlo, const amrex::Real *time, const amrex::Real *dt, const int *bcrec, const int *level, const int *grid_no)
 This is dimension agnostic. For example, dlo always has three elements.
 
using DeriveFuncFab = std::function< void(const amrex::Box &bx, amrex::FArrayBox &derfab, int dcomp, int ncomp, const amrex::FArrayBox &datafab, const amrex::Geometry &geomdata, amrex::Real time, const int *bcrec, int level)>
 
using DeriveFuncMF = std::function< void(amrex::MultiFab &der_mf, int dcomp, int ncomp, const amrex::MultiFab &data_mf, const amrex::Geometry &geomdata, amrex::Real time, const int *bcrec, int level)>
 
using BndryFuncFabDefault = std::function< void(Box const &bx, FArrayBox &data, int dcomp, int numcomp, Geometry const &geom, Real time, const Vector< BCRec > &bcr, int bcomp, int scomp)>
 
template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using AmrParticleContainer = AmrParticleContainer_impl< Particle< T_NStructReal, T_NStructInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
using ErrorFuncDefault = void(*)(int *tag, const int &, const int &, const int &, const int &, const int &, const int &, const int *tagval, const int *clearval, amrex::Real *data, const int &, const int &, const int &, const int &, const int &, const int &, const int *lo, const int *hi, const int *nvar, const int *domain_lo, const int *domain_hi, const amrex::Real *dx, const amrex::Real *xlo, const amrex::Real *prob_lo, const amrex::Real *time, const int *level)
 Type of extern "C" function called by ErrorRec to do tagging of cells for refinement.
 
using ErrorFunc2Default = void(*)(int *tag, const int &, const int &, const int &, const int &, const int &, const int &, const int *tagval, const int *clearval, amrex::Real *data, const int &, const int &, const int &, const int &, const int &, const int &, const int *lo, const int *hi, const int *nvar, const int *domain_lo, const int *domain_hi, const amrex::Real *dx, const int *level, const amrex::Real *avg)
 
using ErrorFunc3DDefault = void(*)(int *tag, const int *tlo, const int *thi, const int *tagval, const int *clearval, amrex::Real *data, const int *data_lo, const int *data_hi, const int *lo, const int *hi, const int *nvar, const int *domain_lo, const int *domain_hi, const amrex::Real *dx, const amrex::Real *xlo, const amrex::Real *prob_lo, const amrex::Real *time, const int *level)
 Dimension agnostic version that always has three elements. Note that this is only implemented for the ErrorFunc class, not ErrorFunc2.
 
using PTR_TO_VOID_FUNC = void(*)()
 
using ErrorHandler = void(*)(const char *)
 
template<class T , std::size_t N>
using Array = std::array< T, N >
 
using RealArray = Array< Real, 3 >
 
using IntArray = Array< int, 3 >
 
template<typename T >
using Array4 = ArrayND< T, 4, true >
 
using Box = BoxND< 3 >
 Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
 
using IntVect = IntVectND< 3 >
 IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
 
using IndexType = IndexTypeND< 3 >
 IndexType is an alias for amrex::IndexTypeND instantiated with AMREX_SPACEDIM.
 
using BoxIndexer = BoxIndexerND< 3 >
 
using BndryBATransformer = BATransformer
 
using BoxIterator = BoxIteratorND< 3 >
 
using DMRef = DistributionMapping::Ref
 
using RuntimeError = std::runtime_error
 
using TheFaArenaPointer = std::unique_ptr< char, TheFaArenaDeleter >
 
using cMultiFab = FabArray< BaseFab< GpuComplex< Real > > >
 
using FArrayBoxFactory = DefaultFabFactory< FArrayBox >
 
template<class T >
using DefaultAllocator = amrex::ArenaAllocator< T >
 
using gpuStream_t = cudaStream_t
 
using gpuDeviceProp_t = cudaDeviceProp
 
using gpuError_t = cudaError_t
 
using Long = amrex_long
 
using ULong = amrex_ulong
 Unsigned integer type guaranteed to be wider than unsigned int.
 
using MultiFabId = FabArrayId
 
using fMultiFab = FabArray< BaseFab< float > >
 
using RealVect = RealVectND< 3 >
 
using BndryFuncDefault = void(*)(Real *data, const int &, const int &, const int &, const int &, const int &, const int &, const int *dom_lo, const int *dom_hi, const Real *dx, const Real *grd_lo, const Real *time, const int *bc)
 
using BndryFunc3DDefault = void(*)(Real *data, const int *lo, const int *hi, const int *dom_lo, const int *dom_hi, const Real *dx, const Real *grd_lo, const Real *time, const int *bc)
 
using UserFillBox = void(*)(Box const &bx, Array4< Real > const &dest, int dcomp, int numcomp, GeometryData const &geom, Real time, const BCRec *bcr, int bcomp, int orig_comp)
 
using randState_t = curandState_t
 
using randGenerator_t = curandGenerator_t
 
using Real = amrex_real
 Floating Point Type for Fields.
 
using ParticleReal = amrex_particle_real
 Floating Point Type for Particles.
 
template<class T , int N, int StartIndex = 0>
using SmallVector = SmallMatrix< T, N, 1, Order::F, StartIndex >
 
template<class T , int N, int StartIndex = 0>
using SmallRowVector = SmallMatrix< T, 1, N, Order::F, StartIndex >
 
template<class... Ts>
using Tuple = std::tuple< Ts... >
 
template<std::size_t I, typename T >
using TypeAt = typename detail::TypeListGet< I, T >::type
 Type at position I of a TypeList.
 
template<template< class... > class TParam, class... Types>
using TypeMultiplier = TypeAt< 0, decltype(detail::TApply< TParam >((TypeList<>{}+...+detail::SingleTypeMultiplier(std::declval< Types >()))))>
 Return the first template argument with the later arguments applied to it. Types of the form T[N] are expanded to T, T, T, T, ... (N times with N >= 1). Types of the form TypeArray<T,N> are expanded to T, T, T, T, ... (N times with N >= 0).
 
template<bool B, class T = void>
using EnableIf_t = std::enable_if_t< B, T >
 
template<template< class... > class Op, class... Args>
using IsDetected = typename detail::Detector< detail::Nonesuch, void, Op, Args... >::value_t
 
template<template< class... > class Op, class... Args>
using Detected_t = typename detail::Detector< detail::Nonesuch, void, Op, Args... >::type
 
template<class Default , template< class... > class Op, class... Args>
using DetectedOr = typename detail::Detector< Default, void, Op, Args... >::type
 
template<class Expected , template< typename... > class Op, class... Args>
using IsDetectedExact = std::is_same< Expected, Detected_t< Op, Args... > >
 
template<class B >
using Negation = std::integral_constant< bool, !bool(B::value)>
 
using MaxResSteadyClock = std::conditional_t< std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock >
 
template<typename K , typename V >
using KeyValuePair = ValLocPair< K, V >
 
using BndryData = BndryDataT< MultiFab >
 
using fBndryData = BndryDataT< fMultiFab >
 
using BndryRegister = BndryRegisterT< MultiFab >
 
using fBndryRegister = BndryRegisterT< fMultiFab >
 
using FabSet = FabSetT< MultiFab >
 
using fFabSet = FabSetT< fMultiFab >
 
using InterpBndryData = InterpBndryDataT< MultiFab >
 
using fInterpBndryData = InterpBndryDataT< fMultiFab >
 
using YAFluxRegister = YAFluxRegisterT< MultiFab >
 
using GMRESMLMG = GMRESMLMGT< MultiFab >
 
using MLABecLaplacian = MLABecLaplacianT< MultiFab >
 
using MLALaplacian = MLALaplacianT< MultiFab >
 
using MLCellABecLap = MLCellABecLapT< MultiFab >
 
using MLCellLinOp = MLCellLinOpT< MultiFab >
 
using MLCGSolver = MLCGSolverT< MultiFab >
 
using MLLinOp = MLLinOpT< MultiFab >
 
using MLMG = MLMGT< MultiFab >
 
using MLMGBndry = MLMGBndryT< MultiFab >
 
using MLPoisson = MLPoissonT< MultiFab >
 
template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParticleContainer = ParticleContainer_impl< Particle< T_NStructReal, T_NStructInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<bool is_const, int T_NStructReal, int T_NStructInt, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParIterBase = ParIterBase_impl< is_const, Particle< T_NStructReal, T_NStructInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<bool is_const, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParIterBaseSoA = ParIterBase_impl< is_const, SoAParticle< T_NArrayReal, T_NArrayInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParConstIter = ParConstIter_impl< Particle< T_NStructReal, T_NStructInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParConstIterSoA = ParConstIter_impl< SoAParticle< T_NArrayReal, T_NArrayInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParIter = ParIter_impl< Particle< T_NStructReal, T_NStructInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParIterSoA = ParIter_impl< SoAParticle< T_NArrayReal, T_NArrayInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using ParticleContainerPureSoA = ParticleContainer_impl< SoAParticle< T_NArrayReal, T_NArrayInt >, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor >
 
using TracerParIter = ParIter< 3 >
 

Enumerations

enum  InterpEM_t { InterpE , InterpB }
 
enum struct  FPExcept : std::uint8_t {
  none = 0B0000 , invalid = 0B0001 , zero = 0B0010 , overflow = 0B0100 ,
  all = 0B0111
}
 
enum class  FabType : int {
  covered = -1 , regular = 0 , singlevalued = 1 , multivalued = 2 ,
  undefined = 100
}
 
enum  FillType { FillLocally , FillRemotely , Unfillable }
 
enum struct  RunOn { Gpu , Cpu , Device =Gpu , Host =Cpu }
 
enum  MakeType { make_alias , make_deep_copy }
 
enum struct  Order { C , F , RowMajor =C , ColumnMajor =F }
 
enum class  Direction : int { x = 0 , y = 1 , z = 2 }
 
enum class  GrowthStrategy : int { Poisson , Exact , Geometric }
 
enum struct  ButcherTableauTypes {
  User = 0 , ForwardEuler , Trapezoid , SSPRK3 ,
  RK4 , NumTypes
}
 
enum struct  IntegratorTypes { ForwardEuler = 0 , ExplicitRungeKutta , Sundials }
 
enum struct  LinOpBCType : int {
  interior = 0 , Dirichlet = 101 , Neumann = 102 , reflect_odd = 103 ,
  Marshak = 104 , SanchezPomraning = 105 , inflow = 106 , inhomogNeumann = 107 ,
  Robin = 108 , symmetry = 109 , Periodic = 200 , bogus = 1729
}
 
enum struct  EBData_t : int {
  levelset , volfrac , centroid , bndrycent ,
  bndrynorm , bndryarea , apx , apy ,
  apz , fcx , fcy , fcz ,
  ecx , ecy , ecz , cellflag
}
 
enum struct  EBSupport : int { none = 0 , basic = 1 , volume = 2 , full = 3 }
 
enum struct  HypreSolverID { BoomerAMG , SSAMG }
 
enum class  BottomSolver : int {
  Default , smoother , bicgstab , cg ,
  bicgcg , cgbicg , hypre , petsc
}
 
enum class  MLMGNormType : int { greater , bnorm , resnorm }
 
enum class  DataLayout { AoS = 0 , SoA }
 

Functions

std::ostream & operator<< (std::ostream &os, AmrMesh const &amr_mesh)
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void ParticleToMesh (PC const &pc, const Vector< MultiFab * > &mf, int lev_min, int lev_max, F &&f, bool zero_out_input=true, bool vol_weight=true)
 
std::ostream & operator<< (std::ostream &os, const ErrorList &elst)
 
void InterpCrseFineBndryEMfield (InterpEM_t interp_type, const Array< MultiFab, 3 > &crse, Array< MultiFab, 3 > &fine, const Geometry &cgeom, const Geometry &fgeom, int ref_ratio)
 
void InterpCrseFineBndryEMfield (InterpEM_t interp_type, const Array< MultiFab const *, 3 > &crse, const Array< MultiFab *, 3 > &fine, const Geometry &cgeom, const Geometry &fgeom, int ref_ratio)
 
void FillPatchInterp (MultiFab &mf_fine_patch, int fcomp, MultiFab const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, MFInterpolater *mapper, const Vector< BCRec > &bcs, int bcscomp)
 
template<typename Interp >
bool ProperlyNested (const IntVect &ratio, const IntVect &blocking_factor, int ngrow, const IndexType &boxType, Interp *mapper)
 Test if AMR grids are properly nested.
 
template<typename MF , typename BC >
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp)
 FillPatch with data from the current level.
 
template<typename MF , typename BC >
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel (MF &mf, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp)
 FillPatch with data from the current level.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 FillPatch with data from the current level and the level below.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (MF &mf, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 FillPatch with data from the current level and the level below.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (Array< MF *, 3 > const &mf, IntVect const &nghost, Real time, const Vector< Array< MF *, 3 > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, 3 > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, 3 > &cbc, const Array< int, 3 > &cbccomp, Array< BC, 3 > &fbc, const Array< int, 3 > &fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, 3 > &bcs, const Array< int, 3 > &bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (Array< MF *, 3 > const &mf, IntVect const &nghost, Real time, const Vector< Array< MF *, 3 > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, 3 > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, 3 > &cbc, int cbccomp, Array< BC, 3 > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, 3 > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (Array< MF *, 3 > const &mf, Real time, const Vector< Array< MF *, 3 > > &cmf, const Vector< Real > &ct, const Vector< Array< MF *, 3 > > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, 3 > &cbc, int cbccomp, Array< BC, 3 > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, 3 > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook , typename PostInterpHook >
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (MF &mf, IntVect const &nghost, Real time, const EB2::IndexSpace &index_space, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp, const PostInterpHook &post_interp)
 FillPatch with data from the current level and the level below.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook , typename PostInterpHook >
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (MF &mf, Real time, const EB2::IndexSpace &index_space, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp, const PostInterpHook &post_interp)
 FillPatch with data from the current level and the level below.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (MF &mf, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 Fill with interpolation of coarse level data.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (MF &mf, IntVect const &nghost, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 Fill with interpolation of coarse level data.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (MF &mf, IntVect const &nghost, Real time, const EB2::IndexSpace *index_space, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 Fill with interpolation of coarse level data.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (Array< MF *, 3 > const &mf, Real time, const Array< MF *, 3 > &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, 3 > &cbc, int cbccomp, Array< BC, 3 > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, 3 > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs together to satisfy certain constraint such as divergence preserving.
 
template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (Array< MF *, 3 > const &mf, IntVect const &nghost, Real time, const Array< MF *, 3 > &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, Array< BC, 3 > &cbc, int cbccomp, Array< BC, 3 > &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Array< Vector< BCRec >, 3 > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
 Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.
 
template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel (MF &mf, IntVect const &nghost, IntVect const &nghost_outside_domain, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp)
 Fill with interpolation of coarse level data.
 
template<typename MF >
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel (MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, IntVect const &snghost, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom)
 FillPatch with data from the current level.
 
template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels (MF &mf, IntVect const &nghost, IntVect const &nghost_outside_domain, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp)
 FillPatch with data from the current level and the level below.
 
template<typename MF , typename BC , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > FillPatchNLevels (MF &mf, int level, const IntVect &nghost, Real time, const Vector< Vector< MF * > > &smf, const Vector< Vector< Real > > &st, int scomp, int dcomp, int ncomp, const Vector< Geometry > &geom, Vector< BC > &bc, int bccomp, const Vector< IntVect > &ratio, Interp *mapper, const Vector< BCRec > &bcr, int bcrcomp)
 FillPatch with data from AMR levels.
 
template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > FillPatchInterp (MF &mf_fine_patch, int fcomp, MF const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp)
 
template<typename MF >
std::enable_if_t< IsFabArray< MF >::value > FillPatchInterp (MF &mf_fine_patch, int fcomp, MF const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, InterpBase *mapper, const Vector< BCRec > &bcs, int bcscomp)
 
template<typename MF , typename iMF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > InterpFace (Interp *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bcscomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs)
 
template<typename MF , typename iMF >
std::enable_if_t< IsFabArray< MF >::value > InterpFace (InterpBase *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bccomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs)
 
FPExcept getFPExcept ()
 Return currently enabled FP exceptions. Linux only.
 
FPExcept setFPExcept (FPExcept excepts)
 
FPExcept disableFPExcept (FPExcept excepts)
 Disable FP exceptions. Linux Only.
 
FPExcept enableFPExcept (FPExcept excepts)
 Enable FP exceptions. Linux Only.
 
void Init_minimal (MPI_Comm mpi_comm)
 
void Finalize_minimal ()
 
std::string Version ()
 
AMReXInitialize (MPI_Comm mpi_comm, std::ostream &a_osout=std::cout, std::ostream &a_oserr=std::cerr, ErrorHandler a_errhandler=nullptr, int a_device_id=-1)
 
AMReXInitialize (int &argc, char **&argv, const std::function< void()> &func_parm_parse, std::ostream &a_osout=std::cout, std::ostream &a_oserr=std::cerr, ErrorHandler a_errhandler=nullptr, int a_device_id=-1)
 
AMReXInitialize (int &argc, char **&argv, bool build_parm_parse=true, MPI_Comm mpi_comm=MPI_COMM_WORLD, const std::function< void()> &func_parm_parse={}, std::ostream &a_osout=std::cout, std::ostream &a_oserr=std::cerr, ErrorHandler a_errhandler=nullptr, int a_device_id=-1)
 
bool Initialized ()
 Returns true if there are any currently-active and initialized AMReX instances (i.e. one for which amrex::Initialize has been called, and amrex::Finalize has not). Otherwise false.
 
void Finalize (AMReX *pamrex)
 
void Finalize ()
 
void ExecOnFinalize (std::function< void()>)
 We maintain a stack of functions that need to be called in Finalize(). The functions are called in LIFO order. The idea here is to allow classes to clean up any "global" state that they maintain when we're exiting from AMReX.
 
void ExecOnInitialize (std::function< void()>)
 
template<class... Ts>
__host__ __device__ void ignore_unused (const Ts &...)
 This shuts up the compiler about unused variables.
 
void Error (const std::string &msg)
 Print out message to cerr and exit via amrex::Abort().
 
void Error_host (const char *type, const char *msg)
 
__host__ __device__ void Error (const char *msg=nullptr)
 
void Warning (const std::string &msg)
 Print out warning message to cerr.
 
void Warning_host (const char *msg)
 
__host__ __device__ void Warning (const char *msg)
 
void Abort (const std::string &msg)
 Print out message to cerr and exit via abort().
 
__host__ __device__ void Abort (const char *msg=nullptr)
 
void Assert_host (const char *EX, const char *file, int line, const char *msg, std::size_t msg_size=0)
 Prints assertion failed messages to cerr and exits via abort(). Intended for use by the BL_ASSERT() macro in <AMReX_BLassert.H>.
 
__host__ __device__ void Assert (const char *EX, const char *file, int line)
 
__host__ __device__ void Assert (const char *EX, const char *file, int line, const char *msg)
 
void Assert (const char *EX, const char *file, int line, const std::string &msg)
 
void write_to_stderr_without_buffering (const char *str)
 This is used by amrex::Error(), amrex::Abort(), and amrex::Assert() to ensure that when writing the message to stderr, that no additional heap-based memory is allocated.
 
void SetErrorHandler (ErrorHandler f)
 
std::ostream & OutStream ()
 
std::ostream & ErrorStream ()
 
int Verbose () noexcept
 
void SetVerbose (int v) noexcept
 
bool InitSNaN () noexcept
 
void SetInitSNaN (bool v) noexcept
 
std::string get_command ()
 
int command_argument_count ()
 
std::string get_command_argument (int number)
 Get command line arguments. The executable name is the zero-th argument. Return empty string if there are not that many arguments. std::string.
 
void GccPlacater ()
 
bool any (FPExcept a)
 
FPExcept operator| (FPExcept a, FPExcept b)
 
FPExcept operator& (FPExcept a, FPExcept b)
 
template<class T >
__host__ __device__ constexpr const T & min (const T &a, const T &b) noexcept
 
template<class T , class ... Ts>
__host__ __device__ constexpr const T & min (const T &a, const T &b, const Ts &... c) noexcept
 
template<class T >
__host__ __device__ constexpr const T & max (const T &a, const T &b) noexcept
 
template<class T , class ... Ts>
__host__ __device__ constexpr const T & max (const T &a, const T &b, const Ts &... c) noexcept
 
template<class T >
__host__ __device__ constexpr T elemwiseMin (T const &a, T const &b) noexcept
 Return the element-wise minimum of the given values for types like XDim3.
 
template<class T , class ... Ts>
__host__ __device__ constexpr T elemwiseMin (const T &a, const T &b, const Ts &... c) noexcept
 Return the element-wise minimum of the given values for types like XDim3.
 
template<class T >
__host__ __device__ constexpr T elemwiseMax (T const &a, T const &b) noexcept
 Return the element-wise maximum of the given values for types like XDim3.
 
template<class T , class ... Ts>
__host__ __device__ constexpr T elemwiseMax (const T &a, const T &b, const Ts &... c) noexcept
 Return the element-wise maximum of the given values for types like XDim3.
 
template<typename T >
__host__ __device__ void Swap (T &t1, T &t2) noexcept
 
template<typename T >
__host__ __device__ constexpr const T & Clamp (const T &v, const T &lo, const T &hi)
 
template<typename T >
__host__ __device__ std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual (T x, T y, int ulp=2)
 
template<class T , class F , std::enable_if_t< std::is_floating_point_v< T >, int > FOO = 0>
__host__ __device__ T bisect (T lo, T hi, F f, T tol=1e-12, int max_iter=100)
 Find a root of a scalar function on a bracketing interval using bisection.
 
template<typename T , typename I , std::enable_if_t< std::is_integral_v< I >, int > = 0>
__host__ __device__ I bisect (T const *d, I lo, I hi, T const &v)
 Find the index of the interval containing a value in a sorted array.
 
template<typename ItType , typename ValType >
__host__ __device__ ItType upper_bound (ItType first, ItType last, const ValType &val)
 Return an iterator to the first element greater than a given value.
 
template<typename ItType , typename ValType >
__host__ __device__ ItType lower_bound (ItType first, ItType last, const ValType &val)
 Return an iterator to the first element not less than a given value.
 
template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0>
__host__ __device__ void linspace (ItType first, const ItType &last, const ValType &start, const ValType &stop)
 Fill a range with linearly spaced values over a closed interval.
 
template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0>
__host__ __device__ void logspace (ItType first, const ItType &last, const ValType &start, const ValType &stop, const ValType &base)
 Fill a range with logarithmically spaced values over a closed interval.
 
template<class T , std::enable_if_t< std::is_same_v< std::decay_t< T >, std::uint8_t >||std::is_same_v< std::decay_t< T >, std::uint16_t >||std::is_same_v< std::decay_t< T >, std::uint32_t >||std::is_same_v< std::decay_t< T >, std::uint64_t >, int > = 0>
__host__ __device__ int clz (T x) noexcept
 Return the number of leading zeros of the given integer.
 
ArenaThe_Arena ()
 
ArenaThe_Async_Arena ()
 
ArenaThe_Device_Arena ()
 
ArenaThe_Managed_Arena ()
 
ArenaThe_Pinned_Arena ()
 
ArenaThe_Cpu_Arena ()
 
ArenaThe_Comms_Arena ()
 
std::size_t aligned_size (std::size_t align_requirement, std::size_t size) noexcept
 Given a minimum required size in bytes, this returns the smallest size greater or equal to size that is a multiple of align_requirement.
 
bool is_aligned (const void *p, std::size_t alignment) noexcept
 Return whether the address p is aligned to alignment bytes.
 
template<class T , typename = typename T::FABType>
std::array< T *, 3 > GetArrOfPtrs (std::array< T, 3 > &a) noexcept
 
template<class T >
std::array< T *, 3 > GetArrOfPtrs (const std::array< std::unique_ptr< T >, 3 > &a) noexcept
 
template<class T >
std::array< T const *, 3 > GetArrOfConstPtrs (const std::array< T, 3 > &a) noexcept
 
template<class T >
std::array< T const *, 3 > GetArrOfConstPtrs (const std::array< T *, 3 > &a) noexcept
 
template<class T >
std::array< T const *, 3 > GetArrOfConstPtrs (const std::array< std::unique_ptr< T >, 3 > &a) noexcept
 
XDim3 makeXDim3 (const Array< Real, 3 > &a) noexcept
 
template<typename T , int N>
 ArrayND (T *, BoxND< N > const &) -> ArrayND< T, N, false >
 
template<typename T , int N>
 ArrayND (T *, BoxND< N > const &, int) -> ArrayND< T, N+1, true >
 
template<typename T , int N>
 ArrayND (T *, IntVectND< N > const &, IntVectND< N > const &) -> ArrayND< T, N, false >
 
template<typename T , int N>
 ArrayND (T *, IntVectND< N > const &, IntVectND< N > const &, int) -> ArrayND< T, N+1, true >
 
template<typename T >
 ArrayND (T *, Dim3 const &, Dim3 const &, int) -> ArrayND< T, 4, true >
 
template<class T >
__host__ __device__ Dim3 lbound (Array4< T > const &a) noexcept
 
template<class T >
__host__ __device__ Dim3 ubound (Array4< T > const &a) noexcept
 
template<class T >
__host__ __device__ Dim3 length (Array4< T > const &a) noexcept
 
template<typename T , int N, bool C>
std::ostream & operator<< (std::ostream &os, const ArrayND< T, N, C > &a)
 
template<typename T >
PolymorphicArray4< T > makePolymorphic (Array4< T > const &a)
 
void BaseFab_Initialize ()
 
void BaseFab_Finalize ()
 
Long TotalBytesAllocatedInFabs () noexcept
 
Long TotalBytesAllocatedInFabsHWM () noexcept
 
Long TotalCellsAllocatedInFabs () noexcept
 
Long TotalCellsAllocatedInFabsHWM () noexcept
 
void ResetTotalBytesAllocatedInFabsHWM () noexcept
 
void update_fab_stats (Long n, Long s, size_t szt) noexcept
 
void update_fab_stats (Long n, Long s, std::size_t szt) noexcept
 
template<typename T >
__host__ __device__ Array4< T > makeArray4 (T *p, Box const &bx, int ncomp) noexcept
 
template<typename T >
std::enable_if_t< std::is_arithmetic_v< T > > placementNew (T *const, Long)
 
template<typename T >
std::enable_if_t< std::is_trivially_default_constructible_v< T > &&!std::is_arithmetic_v< T > > placementNew (T *const ptr, Long n)
 
template<typename T >
std::enable_if_t<!std::is_trivially_default_constructible_v< T > > placementNew (T *const ptr, Long n)
 
template<typename T >
std::enable_if_t< std::is_trivially_destructible_v< T > > placementDelete (T *const, Long)
 
template<typename T >
std::enable_if_t<!std::is_trivially_destructible_v< T > > placementDelete (T *const ptr, Long n)
 
template<class Tto , class Tfrom >
__host__ __device__ void cast (BaseFab< Tto > &tofab, BaseFab< Tfrom > const &fromfab, Box const &bx, SrcComp scomp, DestComp dcomp, NumComps ncomp) noexcept
 
template<typename STRUCT , typename F , std::enable_if_t<(sizeof(STRUCT)<=36 *8) &&std::is_trivially_copyable_v< STRUCT > &&std::is_trivially_destructible_v< STRUCT >, int > FOO = 0>
void fill (BaseFab< STRUCT > &aos_fab, F const &f)
 
template<typename T >
void transposeCtoF (T const *pi, T *po, int nx, int ny, int nz)
 
template<typename T >
void transposeCtoF (T const *pi, T *po, int nx, int ny)
 
void setBC (const Box &bx, const Box &domain, int src_comp, int dest_comp, int ncomp, const Vector< BCRec > &bc_dom, Vector< BCRec > &bcr) noexcept
 Function for setting array of BCs.
 
std::ostream & operator<< (std::ostream &os, const BCRec &b)
 
__host__ __device__ void setBC (const Box &bx, const Box &domain, const BCRec &bc_dom, BCRec &bcr) noexcept
 Function for setting a BC.
 
void FillDomainBoundary (MultiFab &phi, const Geometry &geom, const Vector< BCRec > &bc)
 
void AllGatherBoxes (Vector< Box > &bxs, int n_extra_reserve)
 
template<int dim>
__host__ __device__ BoxND< dim > grow (const BoxND< dim > &b, int i) noexcept
 Grow BoxND in all directions by given amount.
 
template<int dim>
__host__ __device__ BoxND< dim > grow (const BoxND< dim > &b, const IntVectND< dim > &v) noexcept
 Grow BoxND in all directions by given amount.
 
template<int dim>
__host__ __device__ BoxND< dim > grow (const BoxND< dim > &b, int idir, int n_cell) noexcept
 Grow BoxND in given direction by given amount.
 
template<int dim>
__host__ __device__ BoxND< dim > grow (const BoxND< dim > &b, Direction d, int n_cell) noexcept
 Grow BoxND in given direction by given amount.
 
template<int dim>
__host__ __device__ BoxND< dim > growLo (const BoxND< dim > &b, int idir, int n_cell) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > growLo (const BoxND< dim > &b, Direction d, int n_cell) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > growHi (const BoxND< dim > &b, int idir, int n_cell) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > growHi (const BoxND< dim > &b, Direction d, int n_cell) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > coarsen (const BoxND< dim > &b, int ref_ratio) noexcept
 Coarsen BoxND by given (positive) coarsening ratio.
 
template<int dim>
__host__ __device__ BoxND< dim > coarsen (const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept
 Coarsen BoxND by given (positive) coarsening ratio.
 
template<int dim>
__host__ __device__ BoxND< dim > refine (const BoxND< dim > &b, int ref_ratio) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > refine (const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept
 Refine BoxND by given (positive) refinement ratio.
 
template<int dim>
__host__ __device__ BoxND< dim > shift (const BoxND< dim > &b, int dir, int nzones) noexcept
 Return a BoxND with indices shifted by nzones in dir direction.
 
template<int dim>
__host__ __device__ BoxND< dim > shift (const BoxND< dim > &b, const IntVectND< dim > &nzones) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > surroundingNodes (const BoxND< dim > &b, int dir) noexcept
 Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b. NOTE: equivalent to b.convert(dir,NODE) NOTE: error if b.type(dir) == NODE.
 
template<int dim>
__host__ __device__ BoxND< dim > surroundingNodes (const BoxND< dim > &b, Direction d) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > surroundingNodes (const BoxND< dim > &b) noexcept
 Return a BoxND with NODE based coordinates in all directions that encloses BoxND b.
 
template<int dim>
__host__ __device__ BoxND< dim > convert (const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
 Return a BoxND with different type.
 
template<int dim>
__host__ __device__ BoxND< dim > convert (const BoxND< dim > &b, const IndexTypeND< dim > &typ) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > enclosedCells (const BoxND< dim > &b, int dir) noexcept
 Return a BoxND with CELL based coordinates in direction dir that is enclosed by b. NOTE: equivalent to b.convert(dir,CELL) NOTE: error if b.type(dir) == CELL.
 
template<int dim>
__host__ __device__ BoxND< dim > enclosedCells (const BoxND< dim > &b, Direction d) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > enclosedCells (const BoxND< dim > &b) noexcept
 Return a BoxND with CELL based coordinates in all directions that is enclosed by b.
 
template<int dim>
__host__ __device__ BoxND< dim > bdryLo (const BoxND< dim > &b, int dir, int len=1) noexcept
 Return the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
 
template<int dim>
__host__ __device__ BoxND< dim > bdryHi (const BoxND< dim > &b, int dir, int len=1) noexcept
 Return the edge-centered BoxND (in direction dir) defining the high side of BoxND b.
 
template<int dim>
__host__ __device__ BoxND< dim > bdryNode (const BoxND< dim > &b, Orientation face, int len=1) noexcept
 Similar to bdryLo and bdryHi except that it operates on the given face of box b.
 
template<int dim>
__host__ __device__ BoxND< dim > adjCellLo (const BoxND< dim > &b, int dir, int len=1) noexcept
 Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direction dir. The return BoxND is identical to b in the other directions. The return BoxND and b have an empty intersection. NOTE: len >= 1 NOTE: BoxND retval = b.adjCellLo(b,dir,len) is equivalent to the following set of operations: BoxND retval(b); retval.convert(dir,BoxND::CELL); retval.setrange(dir,retval.smallEnd(dir)-len,len);.
 
template<int dim>
__host__ __device__ BoxND< dim > adjCellHi (const BoxND< dim > &b, int dir, int len=1) noexcept
 Similar to adjCellLo but builds an adjacent BoxND on the high end.
 
template<int dim>
__host__ __device__ BoxND< dim > adjCell (const BoxND< dim > &b, Orientation face, int len=1) noexcept
 Similar to adjCellLo and adjCellHi; operates on given face.
 
template<int dim>
__host__ __device__ BoxND< dim > minBox (const BoxND< dim > &b1, const BoxND< dim > &b2) noexcept
 Modify BoxND to that of the minimum BoxND containing both the original BoxND and the argument. Both BoxNDes must have identical type.
 
template<int dim>
std::ostream & operator<< (std::ostream &os, const BoxND< dim > &bx)
 Write an ASCII representation to the ostream.
 
template<int dim>
std::istream & operator>> (std::istream &is, BoxND< dim > &bx)
 Read from istream.
 
template<int d, int... dims>
__host__ __device__ constexpr BoxND< detail::get_sum< d, dims... >()> BoxCat (const BoxND< d > &bx, const BoxND< dims > &...boxes) noexcept
 Return a BoxND obtained by concatenating the input BoxNDs. The dimension of the return value equals the sum of the dimensions of the inputted BoxNDs.
 
template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< BoxND< d >, BoxND< dims >... > BoxSplit (const BoxND< detail::get_sum< d, dims... >()> &bx) noexcept
 Return a tuple of BoxNDs obtained by splitting the input BoxND according to the dimensions specified by the template arguments.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > BoxShrink (const BoxND< old_dim > &bx) noexcept
 Return a new BoxND of dimension new_dim and assigns the first new_dim dimension of this BoxND to it.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > BoxExpand (const BoxND< old_dim > &bx) noexcept
 Return a new BoxND of size new_dim and assigns all values of this BoxND to it and (small=0, big=0, typ=CELL) to the remaining elements.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > BoxResize (const BoxND< old_dim > &bx) noexcept
 Return a new BoxND of size new_dim by either shrinking or expanding this BoxND.
 
template<int dim>
__host__ __device__ IntVectND< dim > lbound_iv (BoxND< dim > const &box) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > ubound_iv (BoxND< dim > const &box) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > begin_iv (BoxND< dim > const &box) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > end_iv (BoxND< dim > const &box) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > length_iv (BoxND< dim > const &box) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > max_lbound_iv (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > max_lbound_iv (BoxND< dim > const &b1, IntVectND< dim > const &lo) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > min_ubound_iv (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > min_ubound_iv (BoxND< dim > const &b1, IntVectND< dim > const &hi) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 lbound (BoxND< dim > const &box) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 ubound (BoxND< dim > const &box) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 begin (BoxND< dim > const &box) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 end (BoxND< dim > const &box) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 length (BoxND< dim > const &box) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 max_lbound (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 max_lbound (BoxND< dim > const &b1, Dim3 const &lo) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 min_ubound (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 min_ubound (BoxND< dim > const &b1, Dim3 const &hi) noexcept
 
template<int dim>
BoxND< dim > getIndexBounds (BoxND< dim > const &b1) noexcept
 
template<int dim>
BoxND< dim > getIndexBounds (BoxND< dim > const &b1, BoxND< dim > const &b2) noexcept
 
template<class T , class ... Ts>
auto getIndexBounds (T const &b1, T const &b2, Ts const &... b3) noexcept
 
template<int dim>
__host__ __device__ IntVectND< dim > getCell (BoxND< dim > const *boxes, int nboxes, Long icell) noexcept
 
template<int dim>
__host__ __device__ BoxND< dim > makeSlab (BoxND< dim > const &b, int direction, int slab_index) noexcept
 
template<int dim = 3, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ BoxND< dim > makeSingleCellBox (int i, int j, int k, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType())
 
template<int dim>
__host__ __device__ BoxND< dim > makeSingleCellBox (IntVectND< dim > const &vect, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType())
 
std::ostream & operator<< (std::ostream &os, const BoxArray &ba)
 Write a BoxArray to an ostream in ASCII format.
 
BoxArray boxComplement (const Box &b1in, const Box &b2)
 Make a BoxArray from the the complement of b2 in b1in.
 
BoxArray complementIn (const Box &b, const BoxArray &ba)
 Make a BoxArray from the complement of BoxArray ba in Box b.
 
BoxArray intersect (const BoxArray &ba, const Box &b, int ng=0)
 Make a BoxArray from the intersection of Box b and BoxArray(+ghostcells).
 
BoxArray intersect (const BoxArray &ba, const Box &b, const IntVect &ng)
 
BoxArray intersect (const BoxArray &lhs, const BoxArray &rhs)
 Make a BoxArray from the intersection of two BoxArrays.
 
BoxList intersect (const BoxArray &ba, const BoxList &bl)
 Make a BoxList from the intersection of BoxArray and BoxList.
 
BoxArray convert (const BoxArray &ba, IndexType typ)
 
BoxArray convert (const BoxArray &ba, const IntVect &typ)
 
BoxArray coarsen (const BoxArray &ba, int ratio)
 
BoxArray coarsen (const BoxArray &ba, const IntVect &ratio)
 
BoxArray refine (const BoxArray &ba, int ratio)
 
BoxArray refine (const BoxArray &ba, const IntVect &ratio)
 
BoxList GetBndryCells (const BoxArray &ba, int ngrow)
 Find the ghost cells of a given BoxArray.
 
void readBoxArray (BoxArray &ba, std::istream &s, bool b=false)
 Read a BoxArray from a stream. If b is true, read in a special way.
 
bool match (const BoxArray &x, const BoxArray &y)
 Note that two BoxArrays that match are not necessarily equal.
 
BoxArray decompose (Box const &domain, int nboxes, Array< bool, 3 > const &decomp={ true, true, true }, bool no_overlap=false)
 Decompose domain box into BoxArray.
 
std::ostream & operator<< (std::ostream &os, const BoxArray::RefID &id)
 
void intersect (BoxDomain &dest, const BoxDomain &fin, const Box &b)
 Compute the intersection of BoxDomain fin with Box b and place the result into BoxDomain dest.
 
void refine (BoxDomain &dest, const BoxDomain &fin, int ratio)
 Refine all Boxes in the domain by the refinement ratio and return the result in dest.
 
void accrete (BoxDomain &dest, const BoxDomain &fin, int sz=1)
 Grow each Box in BoxDomain fin by size sz and place the result into BoxDomain dest.
 
void coarsen (BoxDomain &dest, const BoxDomain &fin, int ratio)
 Coarsen all Boxes in the domain by the refinement ratio. The result is placed into a new BoxDomain.
 
BoxDomain complementIn (const Box &b, const BoxDomain &bl)
 Returns the complement of BoxDomain bl in Box b.
 
std::ostream & operator<< (std::ostream &os, const BoxDomain &bd)
 Output a BoxDomain to an ostream is ASCII format.
 
BoxList intersect (const BoxList &bl, const Box &b)
 Returns a BoxList defining the intersection of bl with b.
 
BoxList refine (const BoxList &bl, int ratio)
 Returns a new BoxList in which each Box is refined by the given ratio.
 
BoxList coarsen (const BoxList &bl, int ratio)
 Returns a new BoxList in which each Box is coarsened by the given ratio.
 
BoxList accrete (const BoxList &bl, int sz)
 Returns a new BoxList in which each Box is grown by the given size.
 
BoxList removeOverlap (const BoxList &bl)
 Return BoxList which covers the same area but has no overlapping boxes.
 
BoxList complementIn (const Box &b, const BoxList &bl)
 Returns a BoxList defining the complement of BoxList bl in Box b.
 
BoxList boxDiff (const Box &b1in, const Box &b2)
 Returns BoxList defining the compliment of b2 in b1in.
 
void boxDiff (BoxList &bl_diff, const Box &b1in, const Box &b2)
 
std::ostream & operator<< (std::ostream &os, const BoxList &blist)
 Output a BoxList to an ostream in ASCII format.
 
std::ostream & operator<< (std::ostream &os, const CArena &arena)
 
template<auto I, auto N, class F >
__host__ __device__ constexpr void constexpr_for (F const &f)
 
std::ostream & operator<< (std::ostream &os, const CoordSys &c)
 
std::istream & operator>> (std::istream &is, CoordSys &c)
 
template<class L , class... Fs, typename... CTOs>
void AnyCTO (TypeList< CTOs... > list_of_compile_time_options, std::array< int, sizeof...(CTOs)> const &runtime_options, L &&l, Fs &&...cto_functs)
 Compile time optimization of kernels with run time options.
 
template<int MT, typename T , class F , typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
 
template<int MT, class F , int dim, typename... CTOs>
void ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, BoxND< dim > const &box, F &&f)
 
template<int MT, typename T , class F , int dim, typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, BoxND< dim > const &box, T ncomp, F &&f)
 
template<typename T , class F , typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, T N, F &&f)
 ParallelFor with compile time optimization of kernels with run time options.
 
template<class F , int dim, typename... CTOs>
void ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, BoxND< dim > const &box, F &&f)
 ParallelFor with compile time optimization of kernels with run time options.
 
template<typename T , class F , int dim, typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > ParallelFor (TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &option, BoxND< dim > const &box, T ncomp, F &&f)
 ParallelFor with compile time optimization of kernels with run time options.
 
std::string demangle (const char *name)
 Demangle C++ name.
 
__host__ __device__ Real dot_product (XDim3 const &a, XDim3 const &b)
 
__host__ __device__ XDim3 cross_product (XDim3 const &a, XDim3 const &b)
 
__host__ __device__ XDim3 operator+ (XDim3 const &a, XDim3 const &b)
 
__host__ __device__ XDim3 operator- (XDim3 const &a, XDim3 const &b)
 
template<typename T , std::enable_if_t< std::is_same_v< T, Dim3 >||std::is_same_v< T, XDim3 > > * = nullptr>
std::ostream & operator<< (std::ostream &os, const T &d)
 
std::ostream & operator<< (std::ostream &os, const DistributionMapping &pmap)
 Our output operator.
 
std::ostream & operator<< (std::ostream &os, const DistributionMapping::RefID &id)
 
DistributionMapping MakeSimilarDM (const BoxArray &ba, const MultiFab &mf, const IntVect &ng)
 Function that creates a DistributionMapping "similar" to that of a MultiFab.
 
DistributionMapping MakeSimilarDM (const BoxArray &ba, const BoxArray &src_ba, const DistributionMapping &src_dm, const IntVect &ng)
 Function that creates a DistributionMapping "similar" to that of a MultiFab.
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::vector< std::pair< std::string, T > > const & getEnumNameValuePairs ()
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
getEnum (std::string_view const &s)
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
getEnumCaseInsensitive (std::string_view const &s)
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::string getEnumNameString (T const &v)
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::vector< std::string > getEnumNameStrings ()
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::string getEnumClassName ()
 
template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
constexpr auto toUnderlying (T v) noexcept
 
template<typename T , std::enable_if_t<!IsBaseFab< T >::value, int > = 0>
Long nBytesOwned (T const &) noexcept
 
template<typename T >
Long nBytesOwned (BaseFab< T > const &fab) noexcept
 
template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type > >, int > BAR = 0>
void Copy (FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type > >, int > BAR = 0>
void Copy (FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Add (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Add (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
std::ostream & operator<< (std::ostream &os, const FabArrayBase::BDKey &id)
 
int nComp (FabArrayBase const &fa)
 
IntVect nGrowVect (FabArrayBase const &fa)
 
BoxArray const & boxArray (FabArrayBase const &fa)
 
DistributionMapping const & DistributionMap (FabArrayBase const &fa)
 
template<class MF >
std::enable_if_t< IsFabArray< MF >::value > FillBoundary (Vector< MF * > const &mf, Vector< int > const &scomp, Vector< int > const &ncomp, Vector< IntVect > const &nghost, Vector< Periodicity > const &period, Vector< int > const &cross={})
 
template<class MF >
std::enable_if_t< IsFabArray< MF >::value > FillBoundary (Vector< MF * > const &mf, const Periodicity &a_period=Periodicity::NonPeriodic())
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceSum (FabArray< FAB > const &fa, int nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceSum (FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceSum (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceMin (FabArray< FAB > const &fa, int nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceMin (FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMin (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceMax (FabArray< FAB > const &fa, int nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type ReduceMax (FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type ReduceMax (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, FabArray< FAB3 > const &fa3, IntVect const &nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool ReduceLogicalAnd (FabArray< FAB > const &fa, int nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool ReduceLogicalAnd (FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool ReduceLogicalAnd (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool ReduceLogicalAnd (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool ReduceLogicalOr (FabArray< FAB > const &fa, int nghost, F &&f)
 
template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool ReduceLogicalOr (FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool ReduceLogicalOr (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, int nghost, F &&f)
 
template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool ReduceLogicalOr (FabArray< FAB1 > const &fa1, FabArray< FAB2 > const &fa2, IntVect const &nghost, F &&f)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void printCell (FabArray< FAB > const &mf, const IntVect &cell, int comp=-1, const IntVect &ng=IntVect::TheZeroVector())
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Swap (FabArray< FAB > &dst, FabArray< FAB > &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Swap (FabArray< FAB > &dst, FabArray< FAB > &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Subtract (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Subtract (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Multiply (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Multiply (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Divide (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Divide (FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Abs (FabArray< FAB > &fa, int icomp, int numcomp, int nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void Abs (FabArray< FAB > &fa, int icomp, int numcomp, const IntVect &nghost)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void prefetchToHost (FabArray< FAB > const &fa, const bool synchronous=true)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void prefetchToDevice (FabArray< FAB > const &fa, const bool synchronous=true)
 
template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>>
void OverrideSync (FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
 
template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>>
void OverrideSync_nowait (FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
 
template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void OverrideSync_finish (FabArray< FAB > &fa)
 
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void dtoh_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp)
 
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void dtoh_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src)
 
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void htod_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp)
 
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void htod_memcpy (FabArray< FAB > &dst, FabArray< FAB > const &src)
 
template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
IntVect indexFromValue (FabArray< FAB > const &mf, int comp, IntVect const &nghost, typename FAB::value_type value)
 
template<typename FAB , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
FAB::value_type Dot (FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
 Compute dot products of two FabArrays.
 
template<typename FAB , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
FAB::value_type Dot (FabArray< FAB > const &x, int xcomp, int ncomp, IntVect const &nghost, bool local=false)
 Compute dot product of FabArray with itself.
 
template<typename IFAB , typename FAB , std::enable_if_t< IsBaseFab< FAB >::value &&IsBaseFab< IFAB >::value, int > FOO = 0>
FAB::value_type Dot (FabArray< IFAB > const &mask, FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
 Compute dot product of two FabArrays in region that mask is true.
 
template<typename IFAB , typename FAB , std::enable_if_t< IsBaseFab< FAB >::value &&IsBaseFab< IFAB >::value, int > FOO = 0>
FAB::value_type Dot (FabArray< IFAB > const &mask, FabArray< FAB > const &x, int xcomp, int ncomp, IntVect const &nghost, bool local=false)
 Compute dot product of FabArray with itself in region that mask is true.
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void setVal (MF &dst, typename MF::value_type val)
 dst = val
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void setBndry (MF &dst, typename MF::value_type val, int scomp, int ncomp)
 dst = val in ghost cells.
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Scale (MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
 dst *= val
 
template<class DMF , class SMF , std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0>
void LocalCopy (DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = src
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void LocalAdd (MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst += src
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Saxpy (MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst += a * src
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Xpay (MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = src + a * dst
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Saxpy_Xpay (MF &dst, typename MF::value_type a_saxpy, MF const &src_saxpy, typename MF::value_type a_xpay, MF const &src_xpay, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst += a_saxpy * src_saxpy followed by dst = src_xpay + a_xpay * dst
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Saxpy_Saxpy (MF &dst1, typename MF::value_type a1, MF const &src1, MF &dst2, typename MF::value_type a2, MF const &src2, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst1 += a1 * src1 followed by dst2 += a2 * src2
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Saypy_Saxpy (MF &dst1, typename MF::value_type a1, MF &dst2, typename MF::value_type a2, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst1 += a1 * dst2 followed by dst2 += a2 * src
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void LinComb (MF &dst, typename MF::value_type a, MF const &src_a, int acomp, typename MF::value_type b, MF const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = a*src_a + b*src_b
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void ParallelCopy (MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
 dst = src w/ MPI communication
 
template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
MF::value_type norminf (MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void setVal (Array< MF, N > &dst, typename MF::value_type val)
 dst = val
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void setBndry (Array< MF, N > &dst, typename MF::value_type val, int scomp, int ncomp)
 dst = val in ghost cells.
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Scale (Array< MF, N > &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
 dst *= val
 
template<class DMF , class SMF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0>
void LocalCopy (Array< DMF, N > &dst, Array< SMF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = src
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void LocalAdd (Array< MF, N > &dst, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst += src
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Saxpy (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst += a * src
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void Xpay (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = src + a * dst
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void LinComb (Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src_a, int acomp, typename MF::value_type b, Array< MF, N > const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost)
 dst = a*src_a + b*src_b
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void ParallelCopy (Array< MF, N > &dst, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
 dst = src w/ MPI communication
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
MF::value_type norminf (Array< MF, N > const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
int nComp (Array< MF, N > const &mf)
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
IntVect nGrowVect (Array< MF, N > const &mf)
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
BoxArray const & boxArray (Array< MF, N > const &mf)
 
template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
DistributionMapping const & DistributionMap (Array< MF, N > const &mf)
 
template<class FAB >
FabArray< BaseFab< int > > OverlapMask (FabArray< FAB > const &fa, IntVect const &nghost, Periodicity const &period)
 
std::ostream & operator<< (std::ostream &os, const IntDescriptor &id)
 
std::istream & operator>> (std::istream &is, IntDescriptor &id)
 
std::ostream & operator<< (std::ostream &os, const RealDescriptor &rd)
 
std::istream & operator>> (std::istream &is, RealDescriptor &rd)
 
std::ostream & operator<< (std::ostream &os, const FArrayBox &f)
 
std::istream & operator>> (std::istream &is, FArrayBox &f)
 
void fab_filcc (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn)
 
void fab_filfc (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn)
 
void fab_filnd (Box const &bx, Array4< Real > const &qn, int ncomp, Box const &domain, Real const *, Real const *, BCRec const *bcn)
 
std::ostream & operator<< (std::ostream &, const Geometry &)
 Nice ASCII output.
 
std::istream & operator>> (std::istream &, Geometry &)
 Nice ASCII input.
 
Geometry coarsen (Geometry const &fine, IntVect const &rr)
 
Geometry coarsen (Geometry const &fine, int rr)
 
Geometry refine (Geometry const &crse, IntVect const &rr)
 
Geometry refine (Geometry const &crse, int rr)
 
const GeometryDefaultGeometry ()
 
template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0>
bool operator== (A1 const &a1, A2 const &a2)
 
template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0>
bool operator!= (A1 const &a1, A2 const &a2)
 
template<typename T >
__host__ __device__ T norm (const GpuComplex< T > &a_z) noexcept
 Return the norm (magnitude squared) of a complex number.
 
template<typename U >
std::ostream & operator<< (std::ostream &out, const GpuComplex< U > &c)
 
template<typename T >
__host__ __device__ GpuComplex< T > operator+ (const GpuComplex< T > &a_x)
 Identity operation on a complex number.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator- (const GpuComplex< T > &a_x)
 Negate a complex number.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator- (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept
 Subtract two complex numbers.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator- (const GpuComplex< T > &a_x, const T &a_y) noexcept
 Subtract a real number from a complex one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator- (const T &a_x, const GpuComplex< T > &a_y) noexcept
 Subtract a complex number from a real one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator+ (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept
 Add two complex numbers.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator+ (const GpuComplex< T > &a_x, const T &a_y) noexcept
 Add a real number to a complex one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator+ (const T &a_x, const GpuComplex< T > &a_y) noexcept
 Add a complex number to a real one.
 
template<typename T , typename U >
__host__ __device__ GpuComplex< T > operator* (const GpuComplex< T > &a_x, const GpuComplex< U > &a_y) noexcept
 Multiply two complex numbers.
 
template<typename T , typename U >
__host__ __device__ GpuComplex< T > operator* (const GpuComplex< T > &a_x, const U &a_y) noexcept
 Multiply a complex number by a real one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator* (const T &a_x, const GpuComplex< T > &a_y) noexcept
 Multiply a real number by a complex one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator/ (const GpuComplex< T > &a_x, const GpuComplex< T > &a_y) noexcept
 Divide a complex number by another one.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator/ (const GpuComplex< T > &a_x, const T &a_y) noexcept
 Divide a complex number by a real.
 
template<typename T >
__host__ __device__ GpuComplex< T > operator/ (const T &a_x, const GpuComplex< T > &a_y) noexcept
 Divide a real number by a complex one.
 
template<typename T >
__host__ __device__ GpuComplex< T > polar (const T &a_r, const T &a_theta) noexcept
 Return a complex number given its polar representation.
 
template<typename T >
__host__ __device__ GpuComplex< T > exp (const GpuComplex< T > &a_z) noexcept
 Complex expotential function.
 
template<typename T >
__host__ __device__ T abs (const GpuComplex< T > &a_z) noexcept
 Return the absolute value of a complex number.
 
template<typename T >
__host__ __device__ GpuComplex< T > sqrt (const GpuComplex< T > &a_z) noexcept
 Return the square root of a complex number.
 
template<typename T >
__host__ __device__ T arg (const GpuComplex< T > &a_z) noexcept
 Return the angle of a complex number's polar representation.
 
template<typename T >
__host__ __device__ GpuComplex< T > log (const GpuComplex< T > &a_z) noexcept
 Complex natural logarithm function.
 
template<typename T >
__host__ __device__ GpuComplex< T > pow (const GpuComplex< T > &a_z, const T &a_y) noexcept
 Raise a complex number to a (real) power.
 
template<typename T >
__host__ __device__ GpuComplex< T > pow (const GpuComplex< T > &a_z, int a_n) noexcept
 Raise a complex number to an integer power.
 
gpuError_t gpuGetLastError ()
 
const char * gpuGetErrorString (gpuError_t error)
 
template<class L , class... Lambdas>
__global__ void launch_global (L f0, Lambdas... fs)
 
template<class L >
void launch_host (L &&f0) noexcept
 
template<class L , class... Lambdas>
void launch_host (L &&f0, Lambdas &&... fs) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelForOMP (T n, L const &f) noexcept
 Performance-portable kernel launch function with optional OpenMP threading.
 
template<typename L >
void ParallelForOMP (Box const &box, L const &f) noexcept
 Performance-portable kernel launch function with optional OpenMP threading.
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelForOMP (Box const &box, T ncomp, L const &f) noexcept
 Performance-portable kernel launch function with optional OpenMP threading.
 
template<typename T , typename L >
void launch (T const &n, L &&f) noexcept
 
template<int MT, typename T , typename L >
void launch (T const &n, L &&f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void For (T n, L const &f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (T n, L &&f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelFor (T n, L const &f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (T n, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<int WIDTH, typename N , typename L , typename M = std::enable_if_t<std::is_integral_v<N>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForSIMD (N n, L const &f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void For (BoxND< dim > const &box, L const &f) noexcept
 
template<int MT, typename L , int dim>
void For (BoxND< dim > const &box, L &&f) noexcept
 
template<typename L , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelFor (BoxND< dim > const &box, L const &f) noexcept
 
template<int MT, typename L , int dim>
void ParallelFor (BoxND< dim > const &box, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename L , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void For (BoxND< dim > const &box, T ncomp, L const &f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelFor (BoxND< dim > const &box, T ncomp, L const &f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
void For (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void For (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void For (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void For (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void For (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void For (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename L1 , typename L2 , int dim>
void ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename L1 , typename L2 , int dim>
void ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<typename L1 , typename L2 , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void ParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<typename L1 , typename L2 , typename L3 , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void ParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (T n, L &&f) noexcept
 
template<typename L , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceParallelFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceParallelFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (T n, L &&f) noexcept
 
template<typename L , int dim>
void HostDeviceFor (BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void HostDeviceFor (BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , int dim, typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
void HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceFor (BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceFor (BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<typename L , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (Gpu::KernelInfo const &, T n, L &&f) noexcept
 
template<typename L , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void HostDeviceFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG (T n, L const &f) noexcept
 
template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG (BoxND< dim > const &box, L const &f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG (BoxND< dim > const &box, T ncomp, L const &f) noexcept
 
template<typename L >
void single_task (L &&f) noexcept
 
template<typename L >
void single_task (gpuStream_t stream, L const &f) noexcept
 
template<int MT, typename L >
void launch (int nblocks, std::size_t shared_mem_bytes, gpuStream_t stream, L const &f) noexcept
 
template<int MT, typename L >
void launch (int nblocks, gpuStream_t stream, L const &f) noexcept
 
template<typename L >
void launch (int nblocks, int nthreads_per_block, std::size_t shared_mem_bytes, gpuStream_t stream, L const &f) noexcept
 
template<typename L >
void launch (int nblocks, int nthreads_per_block, gpuStream_t stream, L &&f) noexcept
 
template<int MT, typename T , typename L , std::enable_if_t< std::is_integral_v< T >, int > FOO = 0>
void launch (T const &n, L const &f) noexcept
 
template<int MT, int dim, typename L >
void launch (BoxND< dim > const &box, L const &f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &, T n, L const &f) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename L , int dim>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, L const &f) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box, T ncomp, L const &f) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelForRNG (T n, L const &f) noexcept
 Performance-portable kernel launch function with random number generation support.
 
template<typename L , int dim>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelForRNG (BoxND< dim > const &box, L const &f) noexcept
 Performance-portable kernel launch function with random number generation support.
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelForRNG (BoxND< dim > const &box, T ncomp, L const &f) noexcept
 Performance-portable kernel launch function with random number generation support.
 
template<int MT, typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > ParallelFor (Gpu::KernelInfo const &, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename L , int dim>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<typename L1 , typename L2 , typename L3 , int dim>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 Performance-portable kernel launch function.
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeDeviceRunnable< L1 >::value &&MaybeDeviceRunnable< L2 >::value &&MaybeDeviceRunnable< L3 >::value > ParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (T n, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename L , int dim>
void ParallelFor (BoxND< dim > const &box, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void ParallelFor (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 Performance-portable kernel launch function.
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (T n, L &&f) noexcept
 
template<typename L , int dim>
void For (BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void For (BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, T n, L &&f) noexcept
 
template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (T n, L &&f) noexcept
 
template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (T n, L &&f) noexcept
 
template<typename L , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept
 
template<int MT, typename L , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, L &&f) noexcept
 
template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box, T ncomp, L &&f) noexcept
 
template<typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, L1 &&f1, L2 &&f2) noexcept
 
template<int MT, typename L1 , typename L2 , typename L3 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, BoxND< dim > const &box2, BoxND< dim > const &box3, L1 &&f1, L2 &&f2, L3 &&f3) noexcept
 
template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2) noexcept
 
template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > HostDeviceParallelFor (Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
 
template<class L >
__global__ void launch_global (L f0)
 
template<typename T , std::enable_if_t< std::is_integral_v< T >, int > = 0>
bool isEmpty (T n) noexcept
 
template<int dim>
bool isEmpty (BoxND< dim > const &b) noexcept
 
std::ostream & operator<< (std::ostream &os, const dim3 &d)
 
std::unique_ptr< iMultiFabOwnerMask (FabArrayBase const &mf, const Periodicity &period, const IntVect &ngrow)
 
template<int dim>
__host__ __device__ IndexTypeND (const IntVectND< dim > &) -> IndexTypeND< dim >
 
template<class... Args, std::enable_if_t< IsConvertible_v< IndexType::CellIndex, Args... >, int > = 0>
__host__ __device__ IndexTypeND (IndexType::CellIndex, Args...) -> IndexTypeND< sizeof...(Args)+1 >
 
template<int dim>
std::ostream & operator<< (std::ostream &os, const IndexTypeND< dim > &it)
 Write an IndexTypeND to an ostream in ASCII.
 
template<int dim>
std::istream & operator>> (std::istream &is, IndexTypeND< dim > &it)
 Read an IndexTypeND from an istream.
 
template<int d, int... dims>
__host__ __device__ constexpr IndexTypeND< detail::get_sum< d, dims... >()> IndexTypeCat (const IndexTypeND< d > &v, const IndexTypeND< dims > &...vects) noexcept
 Returns a IndexTypeND obtained by concatenating the input IndexTypeNDs. The dimension of the return value equals the sum of the dimensions of the inputted IndexTypeNDs.
 
template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< IndexTypeND< d >, IndexTypeND< dims >... > IndexTypeSplit (const IndexTypeND< detail::get_sum< d, dims... >()> &v) noexcept
 Returns a tuple of IndexTypeND obtained by splitting the input IndexTypeND according to the dimensions specified by the template arguments.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > IndexTypeShrink (const IndexTypeND< old_dim > &v) noexcept
 Returns a new IndexTypeND of size new_dim and assigns the first new_dim values of v to it.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > IndexTypeExpand (const IndexTypeND< old_dim > &v, IndexType::CellIndex fill_extra=IndexType::CellIndex::CELL) noexcept
 Returns a new IndexTypeND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > IndexTypeResize (const IndexTypeND< old_dim > &v, IndexType::CellIndex fill_extra=IndexType::CellIndex::CELL) noexcept
 Returns a new IndexTypeND of size new_dim by either shrinking or expanding iv.
 
std::int16_t swapBytes (std::int16_t val)
 
std::int32_t swapBytes (std::int32_t val)
 
std::int64_t swapBytes (std::int64_t val)
 
std::uint16_t swapBytes (std::uint16_t val)
 
std::uint32_t swapBytes (std::uint32_t val)
 
std::uint64_t swapBytes (std::uint64_t val)
 
template<typename To , typename From >
void writeIntData (const From *data, std::size_t size, std::ostream &os, const amrex::IntDescriptor &id)
 
template<typename To , typename From >
void readIntData (To *data, std::size_t size, std::istream &is, const amrex::IntDescriptor &id)
 
__host__ __device__ int coarsen (int i, int ratio) noexcept
 
template<int ratio>
__host__ __device__ int coarsen (int i) noexcept
 
template<std::size_t dim>
__host__ __device__ IntVectND (const Array< int, dim > &) -> IntVectND< dim >
 
template<class... Args, std::enable_if_t< IsConvertible_v< int, Args... >, int > = 0>
__host__ __device__ IntVectND (int, int, Args...) -> IntVectND< sizeof...(Args)+2 >
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > operator+ (int s, const IntVectND< dim > &p) noexcept
 Returns p + s.
 
template<int dim>
__host__ __device__ __host__ __device__ constexpr IntVectND< dim > operator- (int s, const IntVectND< dim > &p) noexcept
 Returns -p + s.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > operator* (int s, const IntVectND< dim > &p) noexcept
 Returns p * s.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > min (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept
 Returns the IntVectND that is the component-wise minimum of two argument IntVectNDs.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > elemwiseMin (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > max (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept
 Returns the IntVectND that is the component-wise maximum of two argument IntVectNDs.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > elemwiseMax (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept
 
template<int dim = 3>
__host__ __device__ IntVectND< dim > BASISV (int dir) noexcept
 Returns a basis vector in the given coordinate direction; eg. IntVectND<3> BASISV<3>(1) == (0,1,0). Note that the coordinate directions are zero based.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > scale (const IntVectND< dim > &p, int s) noexcept
 Returns a IntVectND obtained by multiplying each of the components of this IntVectND by s.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > reflect (const IntVectND< dim > &a, int ref_ix, int idir) noexcept
 Returns an IntVectND that is the reflection of input in the plane which passes through ref_ix and normal to the coordinate direction idir.
 
template<int dim>
__host__ __device__ constexpr IntVectND< dim > diagShift (const IntVectND< dim > &p, int s) noexcept
 Returns IntVectND obtained by adding s to each of the components of this IntVectND.
 
template<int dim>
__host__ __device__ IntVectND< dim > coarsen (const IntVectND< dim > &p, int s) noexcept
 Returns an IntVectND that is the component-wise integer projection of p by s.
 
template<int dim>
__host__ __device__ IntVectND< dim > coarsen (const IntVectND< dim > &p1, const IntVectND< dim > &p2) noexcept
 Returns an IntVectND which is the component-wise integer projection of IntVectND p1 by IntVectND p2.
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ constexpr Dim3 refine (Dim3 const &coarse, IntVectND< dim > const &ratio) noexcept
 
template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 coarsen (Dim3 const &fine, IntVectND< dim > const &ratio) noexcept
 
template<int dim>
std::ostream & operator<< (std::ostream &os, const IntVectND< dim > &iv)
 
template<int dim>
std::istream & operator>> (std::istream &is, IntVectND< dim > &iv)
 
template<int d, int... dims>
__host__ __device__ constexpr IntVectND< detail::get_sum< d, dims... >()> IntVectCat (const IntVectND< d > &v, const IntVectND< dims > &...vects) noexcept
 Returns a IntVectND obtained by concatenating the input IntVectNDs. The dimension of the return value equals the sum of the dimensions of the inputted IntVectNDs.
 
template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< IntVectND< d >, IntVectND< dims >... > IntVectSplit (const IntVectND< detail::get_sum< d, dims... >()> &v) noexcept
 Returns a tuple of IntVectND obtained by splitting the input IntVectND according to the dimensions specified by the template arguments.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > IntVectShrink (const IntVectND< old_dim > &iv) noexcept
 Returns a new IntVectND of size new_dim and assigns the first new_dim values of iv to it.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > IntVectExpand (const IntVectND< old_dim > &iv, int fill_extra=0) noexcept
 Returns a new IntVectND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > IntVectResize (const IntVectND< old_dim > &iv, int fill_extra=0) noexcept
 Returns a new IntVectND of size new_dim by either shrinking or expanding iv.
 
template<std::size_t I, int dim>
__host__ __device__ constexpr int get (IntVectND< dim > const &iv) noexcept
 Get I'th element of IntVectND<dim>
 
template<typename F , int dim>
__host__ __device__ constexpr auto Apply (F &&f, IntVectND< dim > const &iv)
 
template<class F >
__host__ __device__ void Loop (Dim3 lo, Dim3 hi, F const &f) noexcept
 
template<class F >
__host__ __device__ void Loop (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept
 
template<class F >
__host__ __device__ void LoopConcurrent (Dim3 lo, Dim3 hi, F const &f) noexcept
 
template<class F >
__host__ __device__ void LoopConcurrent (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept
 
template<class F , int dim>
__host__ __device__ void Loop (BoxND< dim > const &bx, F const &f) noexcept
 
template<class F , int dim>
__host__ __device__ void Loop (BoxND< dim > const &bx, int ncomp, F const &f) noexcept
 
template<class F , int dim>
__host__ __device__ void LoopConcurrent (BoxND< dim > const &bx, F const &f) noexcept
 
template<class F , int dim>
__host__ __device__ void LoopConcurrent (BoxND< dim > const &bx, int ncomp, F const &f) noexcept
 
template<class F >
void LoopOnCpu (Dim3 lo, Dim3 hi, F const &f) noexcept
 
template<class F >
void LoopOnCpu (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept
 
template<class F >
void LoopConcurrentOnCpu (Dim3 lo, Dim3 hi, F const &f) noexcept
 
template<class F >
void LoopConcurrentOnCpu (Dim3 lo, Dim3 hi, int ncomp, F const &f) noexcept
 
template<class F , int dim>
void LoopOnCpu (BoxND< dim > const &bx, F const &f) noexcept
 
template<class F , int dim>
void LoopOnCpu (BoxND< dim > const &bx, int ncomp, F const &f) noexcept
 
template<class F , int dim>
void LoopConcurrentOnCpu (BoxND< dim > const &bx, F const &f) noexcept
 
template<class F , int dim>
void LoopConcurrentOnCpu (BoxND< dim > const &bx, int ncomp, F const &f) noexcept
 
template<RunOn run_on, typename T , std::enable_if_t< std::is_same_v< T, double >||std::is_same_v< T, float >, int > FOO = 0>
void fill_snan (T *p, std::size_t nelems)
 
std::ostream & operator<< (std::ostream &os, const MemProfiler::Bytes &bytes)
 
std::ostream & operator<< (std::ostream &os, const MemProfiler::Builds &builds)
 
void InterpAddBox (MultiFabCopyDescriptor &fabCopyDesc, BoxList *returnUnfilledBoxes, Vector< FillBoxId > &returnedFillBoxIds, const Box &subbox, MultiFabId faid1, MultiFabId faid2, Real t1, Real t2, Real t, int src_comp, int dest_comp, int num_comp, bool extrap)
 
void InterpFillFab (MultiFabCopyDescriptor &fabCopyDesc, const Vector< FillBoxId > &fillBoxIds, MultiFabId faid1, MultiFabId faid2, FArrayBox &dest, Real t1, Real t2, Real t, int src_comp, int dest_comp, int num_comp, bool extrap)
 
bool TilingIfNotGPU () noexcept
 
bool isMFIterSafe (const FabArrayBase &x, const FabArrayBase &y)
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, TileSize const &ts, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, TileSize const &ts, DynamicTiling dt, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, TileSize const &ts, DynamicTiling dt, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, TileSize const &ts, DynamicTiling dt, F &&f)
 ParallelFor for MultiFab/FabArray.
 
template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > ParallelFor (MF const &mf, IntVect const &ng, int ncomp, TileSize const &ts, DynamicTiling dt, F &&f)
 ParallelFor for MultiFab/FabArray.
 
void GccPlacaterMF ()
 
void average_node_to_cellcenter (MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, int ngrow=0)
 Average nodal-based MultiFab onto cell-centered MultiFab.
 
void average_node_to_cellcenter (MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, IntVect const &ng_vect)
 
void average_edge_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, int ngrow=0)
 Average edge-based MultiFab onto cell-centered MultiFab.
 
void average_edge_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, IntVect const &ng_vect)
 
void average_face_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, IntVect const &ng_vect)
 
void average_face_to_cellcenter (MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow=0)
 Average face-based MultiFab onto cell-centered MultiFab.
 
void average_face_to_cellcenter (MultiFab &cc, const Vector< const MultiFab * > &fc, const Geometry &geom)
 Average face-based MultiFab onto cell-centered MultiFab with geometric weighting.
 
void average_face_to_cellcenter (MultiFab &cc, const Array< const MultiFab *, 3 > &fc, const Geometry &geom)
 Average face-based MultiFab onto cell-centered MultiFab with geometric weighting.
 
void average_cellcenter_to_face (const Vector< MultiFab * > &fc, const MultiFab &cc, const Geometry &geom, int ncomp=1, bool use_harmonic_averaging=false)
 Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.
 
void average_cellcenter_to_face (const Array< MultiFab *, 3 > &fc, const MultiFab &cc, const Geometry &geom, int ncomp=1, bool use_harmonic_averaging=false)
 Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.
 
void average_down (const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
 
void average_down (const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, const IntVect &ratio)
 Volume weighed average of fine MultiFab onto coarse MultiFab.
 
void sum_fine_to_coarse (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio, const Geometry &cgeom, const Geometry &)
 
void average_down_edges (const Vector< const MultiFab * > &fine, const Vector< MultiFab * > &crse, const IntVect &ratio, int ngcrse=0)
 Average fine edge-based MultiFab onto crse edge-based MultiFab.
 
void average_down_edges (const Array< const MultiFab *, 3 > &fine, const Array< MultiFab *, 3 > &crse, const IntVect &ratio, int ngcrse)
 
void average_down_edges (const MultiFab &fine, MultiFab &crse, const IntVect &ratio, int ngcrse)
 
void print_state (const MultiFab &mf, const IntVect &cell, int n=-1, const IntVect &ng=IntVect::TheZeroVector())
 Output state data for a single zone.
 
void writeFabs (const MultiFab &mf, const std::string &name)
 Write each fab individually.
 
void writeFabs (const MultiFab &mf, int comp, int ncomp, const std::string &name)
 
MultiFab ToMultiFab (const iMultiFab &imf)
 Convert iMultiFab to MultiFab.
 
FabArray< BaseFab< Long > > ToLongMultiFab (const iMultiFab &imf)
 Convert iMultiFab to Long.
 
std::unique_ptr< MultiFabget_slice_data (int dir, Real coord, const MultiFab &cc, const Geometry &geom, int start_comp, int ncomp, bool interpolate, RealBox const &bnd_rbx)
 
iMultiFab makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
 
template<typename FAB >
void makeFineMask_doit (FabArray< FAB > &mask, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, typename FAB::value_type crse_value, typename FAB::value_type fine_value)
 
iMultiFab makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const IntVect &cnghost, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value)
 
MultiFab makeFineMask (const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, Real crse_value, Real fine_value)
 
void computeDivergence (MultiFab &divu, const Array< MultiFab const *, 3 > &umac, const Geometry &geom)
 Computes divergence of face-data stored in the umac MultiFab.
 
void computeGradient (MultiFab &grad, const Array< MultiFab const *, 3 > &umac, const Geometry &geom)
 Computes gradient of face-data stored in the umac MultiFab.
 
MultiFab periodicShift (MultiFab const &mf, IntVect const &offset, Periodicity const &period)
 Periodic shift MultiFab.
 
Gpu::HostVector< RealsumToLine (MultiFab const &mf, int icomp, int ncomp, Box const &domain, int direction, bool local=false)
 Sum MultiFab data to line.
 
Real volumeWeightedSum (Vector< MultiFab const * > const &mf, int icomp, Vector< Geometry > const &geom, Vector< IntVect > const &ratio, bool local=false)
 Volume weighted sum for a vector of MultiFabs.
 
void FourthOrderInterpFromFineToCoarse (MultiFab &cmf, int scomp, int ncomp, MultiFab const &fmf, IntVect const &ratio)
 Fourth-order interpolation from fine to coarse level.
 
void FillRandom (MultiFab &mf, int scomp, int ncomp)
 Fill MultiFab with random numbers from uniform distribution.
 
void FillRandomNormal (MultiFab &mf, int scomp, int ncomp, Real mean, Real stddev)
 Fill MultiFab with random numbers from normal distribution.
 
Vector< MultiFabconvexify (Vector< MultiFab const * > const &mf, Vector< IntVect > const &refinement_ratio)
 Convexify AMR data.
 
template<typename CMF , typename FMF , std::enable_if_t< IsFabArray_v< CMF > &&IsFabArray_v< FMF >, int > = 0>
void average_face_to_cellcenter (CMF &cc, int dcomp, const Array< const FMF *, 3 > &fc, int ngrow=0)
 Average face-based FabArray onto cell-centered FabArray.
 
template<typename CMF , typename FMF , std::enable_if_t< IsFabArray_v< CMF > &&IsFabArray_v< FMF >, int > = 0>
void average_face_to_cellcenter (CMF &cc, int dcomp, const Array< const FMF *, 3 > &fc, IntVect const &ng_vect)
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void average_down_faces (const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
 Average fine face-based FabArray onto crse face-based FabArray.
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void average_down_faces (const Vector< const MF * > &fine, const Vector< MF * > &crse, int ratio, int ngcrse=0)
 Average fine face-based FabArray onto crse face-based FabArray.
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void average_down_faces (const Array< const MF *, 3 > &fine, const Array< MF *, 3 > &crse, const IntVect &ratio, int ngcrse=0)
 Average fine face-based FabArray onto crse face-based FabArray.
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void average_down_faces (const Array< const MF *, 3 > &fine, const Array< MF *, 3 > &crse, int ratio, int ngcrse=0)
 Average fine face-based FabArray onto crse face-based FabArray.
 
template<typename FAB >
void average_down_faces (const FabArray< FAB > &fine, FabArray< FAB > &crse, const IntVect &ratio, int ngcrse=0)
 This version does average down for one face direction.
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void average_down_faces (const Array< const MF *, 3 > &fine, const Array< MF *, 3 > &crse, const IntVect &ratio, const Geometry &crse_geom)
 
template<typename FAB >
void average_down_faces (const FabArray< FAB > &fine, FabArray< FAB > &crse, const IntVect &ratio, const Geometry &crse_geom)
 
template<typename FAB >
void average_down_nodal (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, const IntVect &ratio, int ngcrse=0, bool mfiter_is_definitely_safe=false)
 Average fine node-based MultiFab onto crse node-centered MultiFab.
 
template<typename FAB >
void average_down (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, int scomp, int ncomp, const IntVect &ratio)
 
template<typename FAB >
void average_down (const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, int scomp, int ncomp, int rr)
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0>
Vector< typename MF::value_type > get_cell_data (MF const &mf, IntVect const &cell)
 Get data in a cell of MultiFab/FabArray.
 
template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0>
MF get_line_data (MF const &mf, int dir, IntVect const &cell, Box const &bnd_bx=Box())
 Get data in a line of MultiFab/FabArray.
 
template<typename FAB >
iMultiFab makeFineMask (const FabArray< FAB > &cmf, const BoxArray &fba, const IntVect &ratio, int crse_value=0, int fine_value=1)
 
template<typename FAB >
iMultiFab makeFineMask (const FabArray< FAB > &cmf, const BoxArray &fba, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value)
 
template<typename FAB >
iMultiFab makeFineMask (const FabArray< FAB > &cmf, const FabArray< FAB > &fmf, const IntVect &cnghost, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value)
 
template<typename FAB >
iMultiFab makeFineMask (const FabArray< FAB > &cmf, const FabArray< FAB > &fmf, const IntVect &cnghost, const IntVect &ratio, Periodicity const &period, int crse_value, int fine_value, LayoutData< int > &has_cf)
 
template<typename T , typename U >
cast (U const &mf_in)
 example: auto mf = amrex::cast<MultiFab>(imf);
 
template<typename Op , typename T , typename FAB , typename F , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
BaseFab< T > ReduceToPlane (int direction, Box const &domain, FabArray< FAB > const &mf, F const &f)
 Reduce FabArray/MultiFab data to a plane Fab.
 
template<typename Op , typename FA , typename F , std::enable_if_t< IsMultiFabLike_v< FA >, int > FOO = 0>
FA ReduceToPlaneMF (int direction, Box const &domain, FA const &mf, F const &f)
 Reduce FabArray/MultiFab data to plane FabArray.
 
template<typename Op , typename FA , typename F , std::enable_if_t< IsMultiFabLike_v< FA >, int > FOO = 0>
std::pair< FA, FA > ReduceToPlaneMF2 (int direction, Box const &domain, FA const &mf, F const &f)
 Reduce FabArray/MultiFab data to plane FabArray.
 
template<typename F >
Real NormHelper (const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, F const &f, int numcomp, IntVect nghost, bool local)
 Returns part of a norm based on two MultiFabs.
 
template<typename MMF , typename Pred , typename F >
Real NormHelper (const MMF &mask, const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, Pred const &pf, F const &f, int numcomp, IntVect nghost, bool local)
 Returns part of a norm based on three MultiFabs.
 
int numUniquePhysicalCores ()
 
std::ostream & operator<< (std::ostream &os, const Orientation &o)
 Write to an ostream in ASCII format.
 
std::istream & operator>> (std::istream &is, Orientation &o)
 
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, int ncomp, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, IntVect const &nghost, int ncomp, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type ParReduce (TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ParReduce (TypeList< Op > operation_list, TypeList< T > type_list, FabArray< FAB > const &fa, F &&f)
 Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.
 
std::ostream & pout ()
 the stream that all output except error msgs should use
 
void setPoutBaseName (const std::string &a_Name)
 Set the base name for the parallel output files used by pout().
 
const std::string & poutFileName ()
 return the current filename as used by pout()
 
template<typename T , typename F >
int Partition (T *data, int beg, int end, F &&f)
 A GPU-capable partition function for contiguous data.
 
template<typename T , typename F >
int Partition (T *data, int n, F &&f)
 A GPU-capable partition function for contiguous data.
 
template<typename T , typename F >
int Partition (Gpu::DeviceVector< T > &v, F &&f)
 A GPU-capable partition function for contiguous data.
 
template<typename T , typename F >
int StablePartition (T *data, int beg, int end, F &&f)
 A GPU-capable partition function for contiguous data.
 
template<typename T , typename F >
int StablePartition (T *data, int n, F &&f)
 A GPU-capable partition function for contiguous data.
 
template<typename T , typename F >
int StablePartition (Gpu::DeviceVector< T > &v, F &&f)
 A GPU-capable partition function for contiguous data.
 
std::string LevelPath (int level, const std::string &levelPrefix="Level_")
 return the name of the level directory, e.g., Level_5
 
std::string MultiFabHeaderPath (int level, const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell")
 return the path of the multifab to write to the header, e.g., Level_5/Cell
 
std::string LevelFullPath (int level, const std::string &plotfilename, const std::string &levelPrefix="Level_")
 return the full path of the level directory, e.g., plt00005/Level_5
 
std::string MultiFabFileFullPrefix (int level, const std::string &plotfilename, const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell")
 return the full path multifab prefix, e.g., plt00005/Level_5/Cell
 
void PreBuildDirectorHierarchy (const std::string &dirName, const std::string &subDirPrefix, int nSubDirs, bool callBarrier)
 prebuild a hierarchy of directories dirName is built first. if dirName exists, it is renamed. then build dirName/subDirPrefix_0 .. dirName/subDirPrefix_nSubDirs-1 if callBarrier is true, call ParallelDescriptor::Barrier() after all directories are built ParallelDescriptor::IOProcessor() creates the directories
 
void WriteGenericPlotfileHeader (std::ostream &HeaderFile, int nlevels, const Vector< BoxArray > &bArray, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix)
 
void WriteMultiLevelPlotfile (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName="HyperCLaw-V1.1", const std::string &levelPrefix="Level_", const std::string &mfPrefix="Cell", const Vector< std::string > &extra_dirs=Vector< std::string >())
 
void WriteMLMF (const std::string &plotfilename, const Vector< const MultiFab * > &mf, const Vector< Geometry > &geom)
 write a plotfile to disk given: -plotfile name -vector of MultiFabs -vector of Geometrys variable names are written as "Var0", "Var1", etc. refinement ratio is computed from the Geometry vector "time" and "level_steps" are set to zero
 
void WriteMultiLevelPlotfileHeaders (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteSingleLevelPlotfile (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void EB_WriteSingleLevelPlotfile (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void EB_WriteMultiLevelPlotfile (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
GrowthStrategy_EnumTraits amrex_get_enum_traits (GrowthStrategy)
 
std::size_t grow_podvector_capacity (GrowthStrategy strategy, std::size_t new_size, std::size_t old_capacity, std::size_t sizeof_T)
 
template<typename T >
std::ostream & operator<< (std::ostream &os, Array< T, 3 > const &a)
 
template<typename T , typename S >
std::ostream & operator<< (std::ostream &os, const std::pair< T, S > &v)
 
void InitRandom (ULong cpu_seed, int nprocs=ParallelDescriptor::NProcs(), ULong gpu_seed=detail::DefaultGpuSeed())
 Set the seed of the random number generator.
 
Real RandomNormal (Real mean, Real stddev)
 Generate a psuedo-random real from a normal distribution.
 
Real Random ()
 Generate a psuedo-random real from uniform distribution.
 
unsigned int RandomPoisson (Real lambda)
 Generate a psuedo-random integer from a Poisson distribution.
 
Real RandomGamma (Real alpha, Real beta)
 Generate a psuedo-random floating point number from the Gamma distribution.
 
unsigned int Random_int (unsigned int n)
 Generates one pseudorandom unsigned integer which is uniformly distributed on [0,n-1]-interval for each call.
 
ULong Random_long (ULong n)
 Generates one pseudorandom unsigned long which is uniformly distributed on [0,n-1]-interval for each call.
 
void SaveRandomState (std::ostream &os)
 Save and restore random state.
 
void RestoreRandomState (std::istream &is, int nthreads_old, int nstep_old)
 
void UniqueRandomSubset (Vector< int > &uSet, int setSize, int poolSize, bool printSet=false)
 Create a unique subset of random numbers from a pool of integers in the range [0, poolSize - 1] the set will be in the order they are found setSize must be <= poolSize uSet will be resized to setSize if you want all processors to have the same set, call this on one processor and broadcast the array.
 
void ResetRandomSeed (ULong cpu_seed, ULong gpu_seed=detail::DefaultGpuSeed())
 
void DeallocateRandomSeedDevArray ()
 
void FillRandom (Real *p, Long N)
 
void FillRandomNormal (Real *p, Long N, Real mean, Real stddev)
 
__host__ __device__ Real Random (RandomEngine const &random_engine)
 
__host__ __device__ Real RandomNormal (Real mean, Real stddev, RandomEngine const &random_engine)
 
__host__ __device__ unsigned int RandomPoisson (Real lambda, RandomEngine const &random_engine)
 
__host__ __device__ Real RandomGamma (Real alpha, Real beta, RandomEngine const &random_engine)
 
__host__ __device__ unsigned int Random_int (unsigned int n, RandomEngine const &random_engine)
 
randState_tgetRandState ()
 
RandomEngine getInvalidRandomEngine ()
 
std::ostream & operator<< (std::ostream &, const RealBox &)
 Nice ASCII output.
 
std::istream & operator>> (std::istream &, RealBox &)
 Nice ASCII input.
 
bool AlmostEqual (const RealBox &box1, const RealBox &box2, Real eps=0.0) noexcept
 Check for equality of real boxes within a certain tolerance.
 
template<class... Args, std::enable_if_t< IsConvertible_v< Real, Args... >, int > = 0>
__host__ __device__ RealVectND (Real, Real, Args...) -> RealVectND< sizeof...(Args)+2 >
 
template<int dim>
__host__ __device__ RealVectND (const IntVectND< dim > &) -> RealVectND< dim >
 
template<int dim>
__host__ __device__ RealVectND (const GpuArray< Real, dim > &) -> RealVectND< dim >
 
template<int dim>
__host__ __device__ RealVectND< dim > min (const RealVectND< dim > &p1, const RealVectND< dim > &p2) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > max (const RealVectND< dim > &p1, const RealVectND< dim > &p2) noexcept
 
template<int dim = 3>
__host__ __device__ RealVectND< dim > BASISREALV (int dir) noexcept
 
template<typename... Ts, typename... Ps>
__host__ __device__ constexpr GpuTuple< Ts... > IdentityTuple (GpuTuple< Ts... >, ReduceOps< Ps... >) noexcept
 Return a GpuTuple containing the identity element for each operation in ReduceOps. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively.
 
template<typename... Ts, typename... Ps>
__host__ __device__ constexpr GpuTuple< Ts... > IdentityTuple (GpuTuple< Ts... >, TypeList< Ps... >) noexcept
 Return a GpuTuple containing the identity element for each ReduceOp in TypeList. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively.
 
template<class U , class V , int N1, int N2, int N3, Order Ord, int SI>
__host__ __device__ decltype(auto) operator* (SmallMatrix< U, N1, N2, Ord, SI > const &lhs, SmallMatrix< V, N2, N3, Ord, SI > const &rhs)
 
template<class T , int NRows, int NCols, Order ORDER, int SI>
std::ostream & operator<< (std::ostream &os, SmallMatrix< T, NRows, NCols, ORDER, SI > const &mat)
 
std::string toLower (std::string s)
 Converts all characters of the string into lower case based on std::locale.
 
std::string toUpper (std::string s)
 Converts all characters of the string into uppercase based on std::locale.
 
std::string trim (std::string s, std::string const &space)
 
std::string Concatenate (const std::string &root, int num, int mindigits=5)
 Returns rootNNNN where NNNN == num.
 
std::vector< std::string > split (std::string const &s, std::string const &sep=" \t")
 Split a string using given tokens in sep.
 
std::string join (std::vector< std::string > const &sv, char sep)
 Join a vector of strings with given char sep as delimiter.
 
std::string join (std::vector< std::string > const &sv)
 Join a vector of strings without delimiter.
 
template<class TagType , class F >
std::enable_if_t< std::is_same_v< std::decay_t< decltype(std::declval< TagType >().box())>, BoxParallelFor (TagVector< TagType > const &tv, int ncomp, F const &f)
 
void ParallelFor (TagVector< TagType > const &tv, F const &f)
 
template<class TagType , class F >
std::enable_if_t< std::is_same_v< std::decay_t< decltype(std::declval< TagType >().box())>, BoxParallelFor (Vector< TagType > const &tags, int ncomp, F &&f)
 
void ParallelFor (Vector< TagType > const &tags, F &&f)
 
template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & get (GpuTuple< Ts... > &tup) noexcept
 
template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type const & get (GpuTuple< Ts... > const &tup) noexcept
 
template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type && get (GpuTuple< Ts... > &&tup) noexcept
 
template<typename... Ts>
__host__ __device__ constexpr GpuTuple< detail::tuple_decay_t< Ts >... > makeTuple (Ts &&... args)
 
template<typename TP >
__host__ __device__ constexpr auto TupleCat (TP &&a) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP > >::type
 
template<typename TP1 , typename TP2 >
__host__ __device__ constexpr auto TupleCat (TP1 &&a, TP2 &&b) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP1 >, detail::tuple_decay_t< TP2 > >::type
 
template<typename TP1 , typename TP2 , typename... TPs>
__host__ __device__ constexpr auto TupleCat (TP1 &&a, TP2 &&b, TPs &&... args) -> typename detail::tuple_cat_result< detail::tuple_decay_t< TP1 >, detail::tuple_decay_t< TP2 >, detail::tuple_decay_t< TPs >... >::type
 
template<std::size_t... Is, typename... Args>
__host__ __device__ constexpr auto TupleSplit (const GpuTuple< Args... > &tup) noexcept
 Returns a GpuTuple of GpuTuples obtained by splitting the input GpuTuple according to the sizes specified by the template arguments.
 
template<typename F , typename TP >
__host__ __device__ constexpr auto Apply (F &&f, TP &&t) -> typename detail::apply_result< F, detail::tuple_decay_t< TP > >::type
 
template<typename... Args>
__host__ __device__ constexpr GpuTuple< Args &... > Tie (Args &... args) noexcept
 
template<typename... Ts>
__host__ __device__ constexpr GpuTuple< Ts &&... > ForwardAsTuple (Ts &&... args) noexcept
 
template<typename... Ts>
__host__ __device__ constexpr GpuTuple< Ts... > MakeZeroTuple (GpuTuple< Ts... >) noexcept
 Return a GpuTuple containing all zeros. Note that a default-constructed GpuTuple can have uninitialized values.
 
template<typename T >
__host__ __device__ constexpr auto tupleToArray (GpuTuple< T > const &tup)
 
template<typename T , typename T2 , typename... Ts, std::enable_if_t< Same< T, T2, Ts... >::value, int > = 0>
__host__ __device__ constexpr auto tupleToArray (GpuTuple< T, T2, Ts... > const &tup)
 Convert GpuTuple<T,T2,Ts...> to GpuArray.
 
template<typename... Ts, typename F >
constexpr void ForEach (TypeList< Ts... >, F &&f)
 For each type t in TypeList, call f(t)
 
template<typename... Ts, typename F >
constexpr bool ForEachUntil (TypeList< Ts... >, F &&f)
 For each type t in TypeList, call f(t) until true is returned.
 
template<typename... As, typename... Bs>
constexpr auto operator+ (TypeList< As... >, TypeList< Bs... >)
 Concatenate two TypeLists.
 
template<typename... Ls, typename A >
constexpr auto single_product (TypeList< Ls... >, A)
 
template<typename LLs , typename... As>
constexpr auto operator* (LLs, TypeList< As... >)
 
template<typename... Ls>
constexpr auto CartesianProduct (Ls...)
 Cartesian Product of TypeLists.
 
bool is_integer (const char *str)
 Useful C++ Utility Functions.
 
template<typename T >
bool is_it (std::string const &s, T &v)
 Return true and store value in v if string s is type T.
 
const std::vector< std::string > & Tokenize (const std::string &instr, const std::string &separators)
 Splits "instr" into separate pieces based on "separators".
 
bool UtilCreateDirectory (const std::string &path, mode_t mode, bool verbose=false)
 Creates the specified directories. path may be either a full pathname or a relative pathname. It will create all the directories in the pathname, if they don't already exist, so that on successful return the pathname refers to an existing directory. Returns true or false depending upon whether or not it was successful. Also returns true if path is NULL or "/". mode is the mode passed to mkdir() for any directories that must be created (for example: 0755). verbose will print out the directory creation steps.
 
void CreateDirectoryFailed (const std::string &dir)
 Output a message and abort when couldn't create the directory.
 
void FileOpenFailed (const std::string &file)
 Output a message and abort when couldn't open the file.
 
bool FileExists (const std::string &filename)
 Check if a file already exists. Return true if the filename is an existing file, directory, or link. For links, this operates on the link and not what the link points to.
 
std::string UniqueString ()
 Create a (probably) unique string.
 
void UtilCreateCleanDirectory (const std::string &path, bool callbarrier=true)
 Create a new directory, renaming the old one if it exists.
 
void UtilCreateDirectoryDestructive (const std::string &path, bool callbarrier=true)
 Create a new directory, removing old one if it exists.
 
void UtilRenameDirectoryToOld (const std::string &path, bool callbarrier=true)
 Rename a current directory if it exists.
 
void OutOfMemory ()
 Aborts after printing message indicating out-of-memory; i.e. operator new has failed. This is the "supported" set_new_handler() function for AMReX applications.
 
double InvNormDist (double p)
 This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution.
 
double InvNormDistBest (double p)
 This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution.
 
int CRRBetweenLevels (int fromlevel, int tolevel, const Vector< int > &refratios)
 
std::istream & operator>> (std::istream &, const expect &exp)
 
Vector< char > SerializeStringArray (const Vector< std::string > &stringArray)
 
Vector< std::string > UnSerializeStringArray (const Vector< char > &charArray)
 
void SyncStrings (const Vector< std::string > &localStrings, Vector< std::string > &syncedStrings, bool &alreadySynced)
 
template<typename T >
amrex::Long bytesOf (const std::vector< T > &v)
 
template<typename Key , typename T , class Compare >
amrex::Long bytesOf (const std::map< Key, T, Compare > &m)
 
void BroadcastBool (bool &bBool, int myLocalId, int rootId, const MPI_Comm &localComm)
 
void BroadcastString (std::string &bStr, int myLocalId, int rootId, const MPI_Comm &localComm)
 
void BroadcastStringArray (Vector< std::string > &bSA, int myLocalId, int rootId, const MPI_Comm &localComm)
 
template<class T >
void BroadcastArray (Vector< T > &aT, int myLocalId, int rootId, const MPI_Comm &localComm)
 
void Sleep (double sleepsec)
 
double second () noexcept
 
template<typename T >
void hash_combine (uint64_t &seed, const T &val) noexcept
 
template<typename T >
uint64_t hash_vector (const Vector< T > &vec, uint64_t seed=0xDEADBEEFDEADBEEF) noexcept
 
template<class T >
std::ostream & ToString (std::ostream &os, const T &t, const char *symbol_begin="[", const char *symbol_delim=", ", const char *symbol_end="]", const char *symbol_str="\"", int limit=100)
 
template<class T >
std::string ToString (const T &t, const char *symbol_begin="[", const char *symbol_delim=", ", const char *symbol_end="]", const char *symbol_str="\"", int limit=100, std::ostringstream ss=std::ostringstream{})
 
template<typename F , typename... T>
__host__ __device__ auto callNoinline (F const &f, T &&... arg) -> decltype(std::declval< F >()(std::declval< T >()...))
 Call given function without inline.
 
template<class T , typename = typename T::FABType>
Vector< T * > GetVecOfPtrs (Vector< T > &a)
 
template<class T , std::size_t N, typename = typename T::FABType>
Vector< Array< T, N > * > GetVecOfPtrs (Vector< Array< T, N > > &a)
 
template<class T >
Vector< T * > GetVecOfPtrs (const Vector< std::unique_ptr< T > > &a)
 
template<class T , typename = typename T::FABType>
Vector< const T * > GetVecOfConstPtrs (const Vector< T > &a)
 
template<class T , std::size_t N, typename = typename T::FABType>
Vector< Array< T, N > const * > GetVecOfConstPtrs (Vector< Array< T, N > > const &a)
 
template<class T >
Vector< const T * > GetVecOfConstPtrs (const Vector< std::unique_ptr< T > > &a)
 
template<class T , typename = typename T::FABType>
Vector< const T * > GetVecOfConstPtrs (const Vector< T * > &a)
 
template<class T >
Vector< Vector< T * > > GetVecOfVecOfPtrs (const Vector< Vector< std::unique_ptr< T > > > &a)
 
template<class T >
Vector< std::array< T *, 3 > > GetVecOfArrOfPtrs (const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
 
template<class T >
Vector< std::array< T const *, 3 > > GetVecOfArrOfPtrsConst (const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
 
template<class T >
Vector< std::array< T const *, 3 > > GetVecOfArrOfConstPtrs (const Vector< std::array< std::unique_ptr< T >, 3 > > &a)
 
template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0>
Vector< std::array< T const *, 3 > > GetVecOfArrOfConstPtrs (const Vector< std::array< T, 3 > > &a)
 
template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0>
Vector< std::array< T *, 3 > > GetVecOfArrOfPtrs (Vector< std::array< T, 3 > > &a)
 
template<class T >
void FillNull (Vector< T * > &a)
 
template<class T >
void FillNull (Vector< std::unique_ptr< T > > &a)
 
template<class T >
void RemoveDuplicates (Vector< T > &vec)
 
template<class T , class H >
void RemoveDuplicates (Vector< T > &vec)
 
void writeIntData (const int *data, std::size_t size, std::ostream &os, const IntDescriptor &id=FPC::NativeIntDescriptor())
 Functions for writing integer data to disk in a portable, self-describing manner.
 
void readIntData (int *data, std::size_t size, std::istream &is, const IntDescriptor &id)
 
void writeLongData (const Long *data, std::size_t size, std::ostream &os, const IntDescriptor &id=FPC::NativeLongDescriptor())
 
void readLongData (Long *data, std::size_t size, std::istream &is, const IntDescriptor &id)
 
void writeRealData (const Real *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::NativeRealDescriptor())
 
void readRealData (Real *data, std::size_t size, std::istream &is, const RealDescriptor &rd)
 
void writeFloatData (const float *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native32RealDescriptor())
 
void readFloatData (float *data, std::size_t size, std::istream &is, const RealDescriptor &rd)
 
void writeDoubleData (const double *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native64RealDescriptor())
 
void readDoubleData (double *data, std::size_t size, std::istream &is, const RealDescriptor &rd)
 
void writeData (int const *data, std::size_t size, std::ostream &os)
 
void writeData (Long const *data, std::size_t size, std::ostream &os)
 
void writeData (float const *data, std::size_t size, std::ostream &os)
 
void writeData (double const *data, std::size_t size, std::ostream &os)
 
void readData (int *data, std::size_t size, std::istream &is)
 
void readData (Long *data, std::size_t size, std::istream &is)
 
void readData (float *data, std::size_t size, std::istream &is)
 
void readData (double *data, std::size_t size, std::istream &is)
 
std::ostream & operator<< (std::ostream &os, const VisMF::FabOnDisk &fod)
 Write a FabOnDisk to an ostream in ASCII.
 
std::istream & operator>> (std::istream &is, VisMF::FabOnDisk &fod)
 Read a FabOnDisk from an istream.
 
std::ostream & operator<< (std::ostream &os, const Vector< VisMF::FabOnDisk > &fa)
 Write an Vector<FabOnDisk> to an ostream in ASCII.
 
std::istream & operator>> (std::istream &is, Vector< VisMF::FabOnDisk > &fa)
 Read an Vector<FabOnDisk> from an istream.
 
std::ostream & operator<< (std::ostream &os, const VisMF::Header &hd)
 Write a VisMF::Header to an ostream in ASCII.
 
std::istream & operator>> (std::istream &is, VisMF::Header &hd)
 Read a VisMF::Header from an istream.
 
template<typename FAB >
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > Write (const FabArray< FAB > &fa, const std::string &name)
 Write iMultiFab/FabArray<IArrayBox>
 
template<typename FAB >
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > Read (FabArray< FAB > &fa, const std::string &name)
 Read iMultiFab/FabArray<IArrayBox>
 
std::ostream & operator<< (std::ostream &os, const LinOpBCType &t)
 
std::ostream & operator<< (std::ostream &os, const Mask &m)
 
std::istream & operator>> (std::istream &is, Mask &m)
 
void amrex_flux_redistribute (const Box &bx, Array4< Real > const &dqdt, Array4< Real const > const &divc, Array4< Real const > const &wt, Array4< Real const > const &vfrac, Array4< EBCellFlag const > const &flag, int as_crse, Array4< Real > const &rr_drho_crse, Array4< int const > const &rr_flag_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, const Geometry &geom, bool use_wts_in_divnc, int level_mask_not_covered, int icomp, int ncomp, Real dt)
 
void apply_flux_redistribution (const Box &bx, Array4< Real > const &div, Array4< Real const > const &divc, Array4< Real const > const &wt, int icomp, int ncomp, Array4< EBCellFlag const > const &flag_arr, Array4< Real const > const &vfrac, const Geometry &geom, bool use_wts_in_divnc)
 
void apply_eb_redistribution (const Box &bx, MultiFab &div_mf, MultiFab &divc_mf, const MultiFab &weights, MFIter *mfi, int icomp, int ncomp, const EBCellFlagFab &flags_fab, const MultiFab *volfrac, Box &, const Geometry &geom, bool use_wts_in_divnc)
 
void single_level_weighted_redistribute (MultiFab &div_tmp_in, MultiFab &div_out, const MultiFab &weights, int div_comp, int ncomp, const Geometry &geom, bool use_wts_in_divnc)
 
void single_level_redistribute (MultiFab &div_tmp_in, MultiFab &div_out, int div_comp, int ncomp, const Geometry &geom)
 
void apply_flux_redistribution (const amrex::Box &bx, amrex::Array4< amrex::Real > const &div, amrex::Array4< amrex::Real const > const &divc, amrex::Array4< amrex::Real const > const &wt, int icomp, int ncomp, amrex::Array4< amrex::EBCellFlag const > const &flag_arr, amrex::Array4< amrex::Real const > const &vfrac, const amrex::Geometry &geom, bool use_wts_in_divnc)
 
void amrex_flux_redistribute (const amrex::Box &bx, amrex::Array4< amrex::Real > const &dqdt, amrex::Array4< amrex::Real const > const &divc, amrex::Array4< amrex::Real const > const &wt, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::EBCellFlag const > const &flag, int as_crse, amrex::Array4< amrex::Real > const &rr_drho_crse, amrex::Array4< int const > const &rr_flag_crse, int as_fine, amrex::Array4< amrex::Real > const &dm_as_fine, amrex::Array4< int const > const &levmsk, const amrex::Geometry &geom, bool use_wts_in_divnc, int level_mask_not_covered, int icomp, int ncomp, amrex::Real dt)
 
void ApplyRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &dUdt_out, amrex::Array4< amrex::Real > const &dUdt_in, amrex::Array4< amrex::Real const > const &U_in, amrex::Array4< amrex::Real > const &scratch, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &lev_geom, amrex::Real dt, std::string const &redistribution_type, bool use_wts_in_divnc=false, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt, amrex::Array4< amrex::Real const > const &update_scale={})
 
void ApplyMLRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &dUdt_out, amrex::Array4< amrex::Real > const &dUdt_in, amrex::Array4< amrex::Real const > const &U_in, amrex::Array4< amrex::Real > const &scratch, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &lev_geom, amrex::Real dt, std::string const &redistribution_type, int as_crse, amrex::Array4< amrex::Real > const &rr_drho_crse, amrex::Array4< int const > const &rr_flag_crse, int as_fine, amrex::Array4< amrex::Real > const &dm_as_fine, amrex::Array4< int const > const &levmsk, int level_mask_not_covered, amrex::Real fac_for_deltaR=1.0_rt, bool use_wts_in_divnc=false, int icomp=0, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt, amrex::Array4< amrex::Real const > const &update_scale={})
 
void ApplyInitialRedistribution (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, amrex::Geometry const &geom, std::string const &redistribution_type, int srd_max_order=2, amrex::Real target_volfrac=0.5_rt)
 
void StateRedistribute (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real const > const &nrs, amrex::Array4< amrex::Real const > const &alpha, amrex::Array4< amrex::Real const > const &nbhd_vol, amrex::Array4< amrex::Real const > const &cent_hat, amrex::Geometry const &geom, int max_order=2)
 
void MLStateRedistribute (amrex::Box const &bx, int ncomp, amrex::Array4< amrex::Real > const &U_out, amrex::Array4< amrex::Real > const &U_in, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real const > const &nrs, amrex::Array4< amrex::Real const > const &alpha, amrex::Array4< amrex::Real const > const &nbhd_vol, amrex::Array4< amrex::Real const > const &cent_hat, amrex::Geometry const &geom, int as_crse, Array4< Real > const &drho_as_crse, Array4< int const > const &flag_as_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, int is_ghost_cell, amrex::Real fac_for_deltaR, int max_order=2)
 
void MakeITracker (amrex::Box const &bx, amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< int > const &itracker, amrex::Geometry const &geom, amrex::Real target_volfrac)
 
void MakeStateRedistUtils (amrex::Box const &bx, amrex::Array4< amrex::EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &ccent, amrex::Array4< int const > const &itracker, amrex::Array4< amrex::Real > const &nrs, amrex::Array4< amrex::Real > const &alpha, amrex::Array4< amrex::Real > const &nbhd_vol, amrex::Array4< amrex::Real > const &cent_hat, amrex::Geometry const &geom, amrex::Real target_volfrac)
 
void ApplyRedistribution (Box const &bx, int ncomp, Array4< Real > const &dUdt_out, Array4< Real > const &dUdt_in, Array4< Real const > const &U_in, Array4< Real > const &scratch, Array4< EBCellFlag const > const &flag, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< amrex::Real const > const &vfrac, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, Real dt, std::string const &redistribution_type, bool use_wts_in_divnc, int srd_max_order, amrex::Real target_volfrac, Array4< Real const > const &srd_update_scale)
 
void ApplyMLRedistribution (Box const &bx, int ncomp, Array4< Real > const &dUdt_out, Array4< Real > const &dUdt_in, Array4< Real const > const &U_in, Array4< Real > const &scratch, Array4< EBCellFlag const > const &flag, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< amrex::Real const > const &vfrac, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, Real dt, std::string const &redistribution_type, int as_crse, Array4< Real > const &rr_drho_crse, Array4< int const > const &rr_flag_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, int level_mask_not_covered, Real fac_for_deltaR, bool use_wts_in_divnc, int icomp, int srd_max_order, amrex::Real target_volfrac, Array4< Real const > const &srd_update_scale)
 
void ApplyInitialRedistribution (Box const &bx, int ncomp, Array4< Real > const &U_out, Array4< Real > const &U_in, Array4< EBCellFlag const > const &flag, amrex::Array4< amrex::Real const > const &apx, amrex::Array4< amrex::Real const > const &apy, amrex::Array4< amrex::Real const > const &apz, amrex::Array4< amrex::Real const > const &vfrac, amrex::Array4< amrex::Real const > const &fcx, amrex::Array4< amrex::Real const > const &fcy, amrex::Array4< amrex::Real const > const &fcz, amrex::Array4< amrex::Real const > const &ccc, amrex::BCRec const *d_bcrec_ptr, Geometry const &lev_geom, std::string const &redistribution_type, int srd_max_order, amrex::Real target_volfrac)
 
void MakeITracker (Box const &bx, Array4< Real const > const &apx, Array4< Real const > const &apy, Array4< Real const > const &apz, Array4< Real const > const &vfrac, Array4< int > const &itracker, Geometry const &lev_geom, Real target_volfrac)
 
void MLStateRedistribute (Box const &bx, int ncomp, Array4< Real > const &U_out, Array4< Real > const &U_in, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrac, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, Array4< int const > const &itracker, Array4< Real const > const &nrs, Array4< Real const > const &alpha, Array4< Real const > const &nbhd_vol, Array4< Real const > const &cent_hat, Geometry const &lev_geom, int as_crse, Array4< Real > const &drho_as_crse, Array4< int const > const &flag_as_crse, int as_fine, Array4< Real > const &dm_as_fine, Array4< int const > const &levmsk, int is_ghost_cell, Real fac_for_deltaR, int max_order)
 
void StateRedistribute (Box const &bx, int ncomp, Array4< Real > const &U_out, Array4< Real > const &U_in, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrac, Array4< Real const > const &fcx, Array4< Real const > const &fcy, Array4< Real const > const &fcz, Array4< Real const > const &ccent, amrex::BCRec const *d_bcrec_ptr, Array4< int const > const &itracker, Array4< Real const > const &nrs, Array4< Real const > const &alpha, Array4< Real const > const &nbhd_vol, Array4< Real const > const &cent_hat, Geometry const &lev_geom, int max_order)
 
void MakeStateRedistUtils (Box const &bx, Array4< EBCellFlag const > const &flag, Array4< Real const > const &vfrac, Array4< Real const > const &ccent, Array4< int const > const &itracker, Array4< Real > const &nrs, Array4< Real > const &alpha, Array4< Real > const &nbhd_vol, Array4< Real > const &cent_hat, Geometry const &lev_geom, Real target_vol)
 
void FillSignedDistance (MultiFab &mf, bool fluid_has_positive_sign=true)
 Fill MultiFab with signed distance.
 
void FillSignedDistance (MultiFab &mf, EB2::Level const &ls_lev, EBFArrayBoxFactory const &eb_fac, int refratio, bool fluid_has_positive_sign=true)
 Fill MultiFab with signed distance.
 
template<typename G >
void FillImpFunc (MultiFab &mf, G const &gshop, Geometry const &geom)
 Fill MultiFab with implicit function.
 
void TagCutCells (TagBoxArray &tags, const MultiFab &state)
 
void TagVolfrac (TagBoxArray &tags, const MultiFab &volfrac, Real tol)
 
std::ostream & operator<< (std::ostream &os, const EBCellFlag &flag)
 
std::unique_ptr< EBFArrayBoxFactorymakeEBFabFactory (const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support)
 
std::unique_ptr< EBFArrayBoxFactorymakeEBFabFactory (const EB2::Level *, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support)
 
std::unique_ptr< EBFArrayBoxFactorymakeEBFabFactory (const EB2::IndexSpace *, const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support)
 
const EBCellFlagFabgetEBCellFlagFab (const FArrayBox &fab)
 
void EB_set_covered (MultiFab &mf, Real val)
 
void EB_set_covered (MultiFab &mf, int icomp, int ncomp, int ngrow, Real val)
 
void EB_set_covered (MultiFab &mf, int icomp, int ncomp, const Vector< Real > &vals)
 
void EB_set_covered (MultiFab &mf, int icomp, int ncomp, int ngrow, const Vector< Real > &a_vals)
 
void EB_set_covered_faces (const Array< MultiFab *, 3 > &umac, Real val)
 
void EB_set_covered_faces (const Array< MultiFab *, 3 > &umac, const int scomp, const int ncomp, const Vector< Real > &a_vals)
 
void EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
 
void EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, int ratio)
 
void EB_average_down (const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio)
 
void EB_average_down_faces (const Array< const MultiFab *, 3 > &fine, const Array< MultiFab *, 3 > &crse, int ratio, int ngcrse)
 
void EB_average_down_faces (const Array< const MultiFab *, 3 > &fine, const Array< MultiFab *, 3 > &crse, const IntVect &ratio, int ngcrse)
 
void EB_average_down_faces (const Array< const MultiFab *, 3 > &fine, const Array< MultiFab *, 3 > &crse, const IntVect &ratio, const Geometry &crse_geom)
 
void EB_average_down_boundaries (const MultiFab &fine, MultiFab &crse, int ratio, int ngcrse)
 
void EB_average_down_boundaries (const MultiFab &fine, MultiFab &crse, const IntVect &ratio, int ngcrse)
 
void EB_computeDivergence (MultiFab &divu, const Array< MultiFab const *, 3 > &umac, const Geometry &geom, bool already_on_centroids)
 
void EB_computeDivergence (MultiFab &divu, const Array< MultiFab const *, 3 > &umac, const Geometry &geom, bool already_on_centroids, const MultiFab &vel_eb)
 
void EB_average_face_to_cellcenter (MultiFab &ccmf, int dcomp, const Array< MultiFab const *, 3 > &fmf)
 
void EB_interp_CC_to_Centroid (MultiFab &cent, const MultiFab &cc, int scomp, int dcomp, int ncomp, const Geometry &geom)
 
void EB_interp_CC_to_FaceCentroid (const MultiFab &cc, MultiFab &fc_x, MultiFab &fc_y, MultiFab &fc_z, int scomp, int dcomp, int ncomp, const Geometry &a_geom, const Vector< BCRec > &a_bcs)
 
void EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, const Array< MultiFab *, 3 > &phi_faces, int scomp, int dcomp, int nc, const Geometry &geom, const amrex::Vector< amrex::BCRec > &a_bcs)
 
void EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, const Vector< MultiFab * > &phi_faces, int scomp, int dcomp, int nc, const Geometry &geom, const amrex::Vector< amrex::BCRec > &a_bcs)
 
void EB_interp_CellCentroid_to_FaceCentroid (const MultiFab &phi_centroid, MultiFab &phi_xface, MultiFab &phi_yface, MultiFab &phi_zface, int scomp, int dcomp, int ncomp, const Geometry &a_geom, const Vector< BCRec > &a_bcs)
 
void WriteEBSurface (const BoxArray &ba, const DistributionMapping &dmap, const Geometry &geom, const EBFArrayBoxFactory *ebf)
 
static int CreateWriteHDF5AttrDouble (hid_t loc, const char *name, hsize_t n, const double *data)
 
static int CreateWriteHDF5AttrInt (hid_t loc, const char *name, hsize_t n, const int *data)
 
static int CreateWriteHDF5AttrString (hid_t loc, const char *name, const char *str)
 
static void SetHDF5fapl (hid_t fapl, MPI_Comm comm)
 
static void WriteGenericPlotfileHeaderHDF5 (hid_t fid, int nlevels, const Vector< const MultiFab * > &mf, const Vector< BoxArray > &bArray, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteMultiLevelPlotfileHDF5SingleDset (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteMultiLevelPlotfileHDF5MultiDset (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteSingleLevelPlotfileHDF5 (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteSingleLevelPlotfileHDF5SingleDset (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteSingleLevelPlotfileHDF5MultiDset (const std::string &plotfilename, const MultiFab &mf, const Vector< std::string > &varnames, const Geometry &geom, Real time, int level_step, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
void WriteMultiLevelPlotfileHDF5 (const std::string &plotfilename, int nlevels, const Vector< const MultiFab * > &mf, const Vector< std::string > &varnames, const Vector< Geometry > &geom, Real time, const Vector< int > &level_steps, const Vector< IntVect > &ref_ratio, const std::string &compression, const std::string &versionName, const std::string &levelPrefix, const std::string &mfPrefix, const Vector< std::string > &extra_dirs)
 
static int CreateWriteHDF5Attr (hid_t loc, const char *name, hsize_t n, void *data, hid_t dtype)
 
static int CreateWriteHDF5AttrString (hid_t loc, const char *name, const char *str)
 
static int ReadHDF5Attr (hid_t loc, const char *name, void *data, hid_t dtype)
 
static void SetHDF5fapl (hid_t fapl, MPI_Comm comm)
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void WriteHDF5ParticleDataSync (PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, const std::string &compression, F &&f, bool is_checkpoint)
 
std::unique_ptr< HypremakeHypre (const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask)
 
std::unique_ptr< PETScABecLapmakePetsc (const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_)
 
void Init_FFT (MPI_Comm comm)
 Initialize FFT.
 
void Finalize_FFT ()
 
template<typename V1 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value > ForEach (V1 &x, F const &f)
 
template<typename V1 , typename V2 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value > ForEach (V1 &x, V2 &y, F const &f)
 
template<typename V1 , typename V2 , typename V3 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value > ForEach (V1 &x, V2 &y, V3 &z, F const &f)
 
template<typename V1 , typename V2 , typename V3 , typename V4 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value &&IsAlgVector< std::decay_t< V4 > >::value > ForEach (V1 &x, V2 &y, V3 &z, V4 &a, F const &f)
 
template<typename V1 , typename V2 , typename V3 , typename V4 , typename V5 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value &&IsAlgVector< std::decay_t< V2 > >::value &&IsAlgVector< std::decay_t< V3 > >::value &&IsAlgVector< std::decay_t< V4 > >::value &&IsAlgVector< std::decay_t< V5 > >::value > ForEach (V1 &x, V2 &y, V3 &z, V4 &a, V5 &b, F const &f)
 
template<typename T , typename Allocator >
Dot (AlgVector< T, Allocator > const &x, AlgVector< T, Allocator > const &y, bool local=false)
 
template<typename T , typename Allocator >
void Axpy (AlgVector< T, Allocator > &y, T a, AlgVector< T, Allocator > const &x)
 
template<typename T , typename Allocator >
void Xpay (AlgVector< T, Allocator > &y, T a, AlgVector< T, Allocator > const &x)
 
template<typename T , typename Allocator >
void LinComb (AlgVector< T, Allocator > &y, T a, AlgVector< T, Allocator > const &xa, T b, AlgVector< T, Allocator > const &xb)
 
template<typename C , typename T , template< typename > class AD, template< typename > class AS, std::enable_if_t< std::is_same_v< C, Gpu::HostToDevice >||std::is_same_v< C, Gpu::DeviceToHost >||std::is_same_v< C, Gpu::DeviceToDevice >, int > = 0>
void duplicateCSR (C c, CSR< T, AD > &dst, CSR< T, AS > const &src)
 
template<typename T , template< typename > class V>
CSR< T, V > transpose (CSR< T, V > const &csr, Long ncols)
 
template<typename T , template< typename > class Allocator>
SpMatrix< T, Allocator > transpose (SpMatrix< T, Allocator > const &A, AlgPartition col_partition)
 
template<typename T >
void SpMV (Long nrows, Long ncols, T *__restrict__ py, CsrView< T const > const &A, T const *__restrict__ px)
 
template<typename T , template< typename > class AllocM, typename AllocV >
void SpMV (AlgVector< T, AllocV > &y, SpMatrix< T, AllocM > const &A, AlgVector< T, AllocV > const &x)
 
template<typename T , template< typename > class AllocM, typename AllocV >
void computeResidual (AlgVector< T, AllocV > &res, SpMatrix< T, AllocM > const &A, AlgVector< T, AllocV > const &x, AlgVector< T, AllocV > const &b)
 res = b - A*x
 
MLMGNormType_EnumTraits amrex_get_enum_traits (MLMGNormType)
 
template<int N, typename T , typename M , typename P >
__host__ __device__ int pcg_solve (T *__restrict__ x, T *__restrict__ r, M const &mat, P const &precond, int maxiter, T rel_tol)
 Preconditioned conjugate gradient solver.
 
template<class T >
constexpr decltype(T::is_particle_tile_data) IsParticleTileData ()
 
template<class T , class... Args>
constexpr bool IsParticleTileData (Args...)
 
template<typename A , typename B , std::enable_if_t< std::is_same_v< std::remove_cv_t< A >, std::remove_cv_t< B > >, int > = 0>
bool isSame (A const *pa, B const *pb)
 
__host__ __device__ std::uint64_t SetParticleIDandCPU (Long id, int cpu) noexcept
 
template<int NReal, int NInt>
std::ostream & operator<< (std::ostream &os, const Particle< NReal, NInt > &p)
 
template<int NReal>
std::ostream & operator<< (std::ostream &os, const Particle< NReal, 0 > &p)
 
template<int NInt>
std::ostream & operator<< (std::ostream &os, const Particle< 0, NInt > &p)
 
template<int NReal = 0, int NInt = 0>
std::ostream & operator<< (std::ostream &os, const Particle< 0, 0 > &p)
 
void communicateParticlesFinish (const ParticleCopyPlan &plan)
 
template<class PC , class Buffer , std::enable_if_t< IsParticleContainer< PC >::value &&std::is_base_of_v< PolymorphicArenaAllocator< typename Buffer::value_type >, Buffer >, int > foo = 0>
void packBuffer (const PC &pc, const ParticleCopyOp &op, const ParticleCopyPlan &plan, Buffer &snd_buffer)
 
template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void unpackBuffer (PC &pc, const ParticleCopyPlan &plan, const Buffer &snd_buffer, UnpackPolicy const &policy)
 
template<class PC , class SndBuffer , class RcvBuffer , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void communicateParticlesStart (const PC &pc, ParticleCopyPlan &plan, const SndBuffer &snd_buffer, RcvBuffer &rcv_buffer)
 
template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void unpackRemotes (PC &pc, const ParticleCopyPlan &plan, Buffer &rcv_buffer, UnpackPolicy const &policy)
 
template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void ParticleToMesh (PC const &pc, MF &mf, int lev, F const &f, bool zero_out_input=true)
 
template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void MeshToParticle (PC &pc, MF const &mf, int lev, F const &f)
 
Long CountSnds (const std::map< int, Vector< char > > &not_ours, Vector< Long > &Snds)
 
Long doHandShake (const std::map< int, Vector< char > > &not_ours, Vector< Long > &Snds, Vector< Long > &Rcvs)
 
Long doHandShakeLocal (const std::map< int, Vector< char > > &not_ours, const Vector< int > &neighbor_procs, Vector< Long > &Snds, Vector< Long > &Rcvs)
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceSum (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceSum (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceSum (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMax (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMax (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMax (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMin (PC const &pc, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMin (PC const &pc, int lev, F &&f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto ReduceMin (PC const &pc, int lev_min, int lev_max, F const &f) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalAnd (PC const &pc, F &&f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalAnd (PC const &pc, int lev, F &&f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalAnd (PC const &pc, int lev_min, int lev_max, F const &f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalOr (PC const &pc, F &&f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalOr (PC const &pc, int lev, F &&f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool ReduceLogicalOr (PC const &pc, int lev_min, int lev_max, F const &f)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type ParticleReduce (PC const &pc, F &&f, ReduceOps &reduce_ops)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.
 
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type ParticleReduce (PC const &pc, int lev, F &&f, ReduceOps &reduce_ops)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.
 
template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type ParticleReduce (PC const &pc, int lev_min, int lev_max, F const &f, ReduceOps &reduce_ops)
 A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.
 
template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void copyParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ConstParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
 A general single particle copying routine that can run on the GPU.
 
template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void copyParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
 A general single particle copying routine that can run on the GPU.
 
template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void swapParticle (const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
 A general single particle swapping routine that can run on the GPU.
 
template<typename DstTile , typename SrcTile >
void copyParticles (DstTile &dst, const SrcTile &src) noexcept
 Copy particles from src to dst. This version copies all the particles, writing them to the beginning of dst.
 
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void copyParticles (DstTile &dst, const SrcTile &src, Index src_start, Index dst_start, N n) noexcept
 Copy particles from src to dst. This version copies n particles starting at index src_start, writing the result starting at dst_start.
 
template<typename DstTile , typename SrcTile , typename F >
void transformParticles (DstTile &dst, const SrcTile &src, F &&f) noexcept
 Apply the function f to all the particles in src, writing the result to dst. This version does all the particles in src.
 
template<typename DstTile , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void transformParticles (DstTile &dst, const SrcTile &src, Index src_start, Index dst_start, N n, F const &f) noexcept
 Apply the function f to particles in src, writing the result to dst. This version applies the function to n particles starting at index src_start, writing the result starting at dst_start.
 
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename F >
void transformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, F &&f) noexcept
 Apply the function f to all the particles in src, writing the results to dst1 and dst2. This version does all the particles in src.
 
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void transformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Index src_start, Index dst1_start, Index dst2_start, N n, F const &f) noexcept
 Apply the function f to particles in src, writing the results to dst1 and dst2. This version applies the function to n particles starting at index src_start, writing the result starting at dst1_start and dst2_start.
 
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index filterParticles (DstTile &dst, const SrcTile &src, const Index *mask) noexcept
 Conditionally copy particles from src to dst based on the value of mask.
 
template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index filterParticles (DstTile &dst, const SrcTile &src, const Index *mask, Index src_start, Index dst_start, N n) noexcept
 Conditionally copy particles from src to dst based on the value of mask. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.
 
template<typename DstTile , typename SrcTile , typename Pred , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int filterParticles (DstTile &dst, const SrcTile &src, Pred &&p) noexcept
 Conditionally copy particles from src to dst based on a predicate.
 
template<typename DstTile , typename SrcTile , typename Pred , typename Index , typename N , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, Index > nvccfoo = 0>
Index filterParticles (DstTile &dst, const SrcTile &src, Pred const &p, Index src_start, Index dst_start, N n) noexcept
 Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.
 
template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index filterAndTransformParticles (DstTile &dst, const SrcTile &src, Index *mask, F const &f, Index src_start, Index dst_start) noexcept
 Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy.
 
template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index filterAndTransformParticles (DstTile &dst, const SrcTile &src, Index *mask, F &&f) noexcept
 Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy.
 
template<typename DstTile , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int filterAndTransformParticles (DstTile &dst, const SrcTile &src, Pred &&p, F &&f) noexcept
 Conditionally copy particles from src to dst based on a predicate. A transformation will also be applied to the particles on copy.
 
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index filterAndTransformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Index *mask, F const &f) noexcept
 Conditionally copy particles from src to dst1 and dst2 based on the value of mask. A transformation will also be applied to the particles on copy.
 
template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int filterAndTransformParticles (DstTile1 &dst1, DstTile2 &dst2, const SrcTile &src, Pred const &p, F &&f) noexcept
 Conditionally copy particles from src to dst1 and dst2 based on a predicate. A transformation will also be applied to the particles on copy.
 
template<typename DstTile , typename SrcTile , typename Pred , typename F , typename Index , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, Index > nvccfoo = 0>
Index filterAndTransformParticles (DstTile &dst, const SrcTile &src, Pred const &p, F &&f, Index src_start, Index dst_start) noexcept
 Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.
 
template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void gatherParticles (PTile &dst, const PTile &src, N np, const Index *inds)
 Gather particles copies particles into contiguous order from an arbitrary order. Specifically, the particle at the index inds[i] in src will be copied to the index i in dst.
 
template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void scatterParticles (PTile &dst, const PTile &src, N np, const Index *inds)
 Scatter particles copies particles from contiguous order into an arbitrary order. Specifically, the particle at the index i in src will be copied to the index inds[i] in dst.
 
IntVect computeRefFac (const ParGDBBase *a_gdb, int src_lev, int lev)
 
Vector< intcomputeNeighborProcs (const ParGDBBase *a_gdb, int ngrow)
 
template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value, int > foo = 0>
int numParticlesOutOfRange (Iterator const &pti, int nGrow)
 Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator.
 
template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value, int > foo = 0>
int numParticlesOutOfRange (Iterator const &pti, IntVect nGrow)
 Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator.
 
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int numParticlesOutOfRange (PC const &pc, int nGrow)
 Returns the number of particles that are more than nGrow cells from their assigned box.
 
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int numParticlesOutOfRange (PC const &pc, IntVect nGrow)
 Returns the number of particles that are more than nGrow cells from their assigned box.
 
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int numParticlesOutOfRange (PC const &pc, int lev_min, int lev_max, int nGrow)
 Returns the number of particles that are more than nGrow cells from their assigned box.
 
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int numParticlesOutOfRange (PC const &pc, int lev_min, int lev_max, IntVect nGrow)
 Returns the number of particles that are more than nGrow cells from their assigned box.
 
__host__ __device__ int getTileIndex (const IntVect &iv, const Box &box, const bool a_do_tiling, const IntVect &a_tile_size, Box &tbx)
 
__host__ __device__ int numTilesInBox (const Box &box, const bool a_do_tiling, const IntVect &a_tile_size)
 
template<typename P >
__host__ __device__ IntVect getParticleCell (P const &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi) noexcept
 Returns the cell index for a given particle using the provided lower bounds and cell sizes.
 
template<typename P >
__host__ __device__ IntVect getParticleCell (P const &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi, const Box &domain) noexcept
 Returns the cell index for a given particle using the provided lower bounds, cell sizes and global domain offset.
 
template<typename PTD >
__host__ __device__ IntVect getParticleCell (PTD const &ptd, int i, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi, const Box &domain) noexcept
 
template<typename P >
__host__ __device__ int getParticleGrid (P const &p, amrex::Array4< int > const &mask, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &dxi, const Box &domain) noexcept
 
template<typename P >
__host__ __device__ bool enforcePeriodic (P &p, amrex::GpuArray< amrex::Real, 3 > const &plo, amrex::GpuArray< amrex::Real, 3 > const &phi, amrex::GpuArray< amrex::ParticleReal, 3 > const &rlo, amrex::GpuArray< amrex::ParticleReal, 3 > const &rhi, amrex::GpuArray< int, 3 > const &is_per) noexcept
 
template<typename PTile , typename ParFunc >
int partitionParticles (PTile &ptile, ParFunc const &is_left)
 Reorders the ParticleTile into two partitions left [0, num_left-1] and right [num_left, ptile.numParticles()-1] and returns the number of particles in the left partition.
 
template<typename PTile , typename ParFunc >
void partitionParticles (PTile &ptile, int num_left, ParFunc const &is_left)
 Reorders the ParticleTile into two partitions left [0, num_left-1] and right [num_left, ptile.numParticles()-1]. This version of the function requires the correct amount for num_left to be passed as an input, which allows it to skip a reduction. 
 
template<typename PTile >
void removeInvalidParticles (PTile &ptile)
 
template<typename PTile , typename PLocator , typename CellAssignor >
int partitionParticlesByDest (PTile &ptile, const PLocator &ploc, CellAssignor const &assignor, const ParticleBufferMap &pmap, const GpuArray< Real, 3 > &plo, const GpuArray< Real, 3 > &phi, const GpuArray< ParticleReal, 3 > &rlo, const GpuArray< ParticleReal, 3 > &rhi, const GpuArray< int, 3 > &is_per, int lev, int gid, int, int lev_min, int lev_max, int nGrow, bool remove_negative)
 
template<class PC1 , class PC2 >
bool SameIteratorsOK (const PC1 &pc1, const PC2 &pc2)
 
template<class PC >
void EnsureThreadSafeTiles (PC &pc)
 
template<class index_type , typename F >
void PermutationForDeposition (Gpu::DeviceVector< index_type > &perm, index_type nitems, index_type nbins, F const &f)
 
template<class index_type , class PTile >
void PermutationForDeposition (Gpu::DeviceVector< index_type > &perm, index_type nitems, const PTile &ptile, Box bx, Geometry geom, const IntVect idx_type)
 
template<typename P >
std::string getDefaultCompNameReal (const int i)
 
template<typename P >
std::string getDefaultCompNameInt (const int i)
 
template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void WriteBinaryParticleDataSync (PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, F const &f, bool is_checkpoint)
 
template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void WriteBinaryParticleDataAsync (PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, bool is_checkpoint)
 
Arithmetic functions
template<int dim>
__host__ __device__ RealVectND< dim > operator/ (Real s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator+ (Real s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator- (Real s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator* (Real s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator/ (const RealVectND< dim > &s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator+ (const RealVectND< dim > &s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator- (const RealVectND< dim > &s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > operator* (const RealVectND< dim > &s, const RealVectND< dim > &p) noexcept
 
template<int dim>
__host__ __device__ RealVectND< dim > scale (const RealVectND< dim > &p, Real s) noexcept
 
template<int dim>
std::ostream & operator<< (std::ostream &os, const RealVectND< dim > &p)
 
template<int dim>
std::istream & operator>> (std::istream &is, RealVectND< dim > &p)
 
template<int d, int... dims>
__host__ __device__ constexpr RealVectND< detail::get_sum< d, dims... >()> RealVectCat (const RealVectND< d > &v, const RealVectND< dims > &...vects) noexcept
 Returns a RealVectND obtained by concatenating the input RealVectNDs. The dimension of the return value equals the sum of the dimensions of the inputted RealVectNDs.
 
template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< RealVectND< d >, RealVectND< dims >... > RealVectSplit (const RealVectND< detail::get_sum< d, dims... >()> &v) noexcept
 Returns a tuple of RealVectND obtained by splitting the input RealVectND according to the dimensions specified by the template arguments.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > RealVectShrink (const RealVectND< old_dim > &iv) noexcept
 Returns a new RealVectND of size new_dim and assigns the first new_dim values of iv to it.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > RealVectExpand (const RealVectND< old_dim > &iv, Real fill_extra=0) noexcept
 Returns a new RealVectND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.
 
template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > RealVectResize (const RealVectND< old_dim > &iv, Real fill_extra=0) noexcept
 Returns a new RealVectND of size new_dim by either shrinking or expanding iv.
 

Variables

static constexpr Real INVALID_TIME = -1.0e200_rt
 
static constexpr int MFNEWDATA = 0
 
static constexpr int MFOLDDATA = 1
 
PCInterp pc_interp
 CONSTRUCT A GLOBAL OBJECT OF EACH VERSION.
 
NodeBilinear node_bilinear_interp
 
FaceLinear face_linear_interp
 
FaceConservativeLinear face_cons_linear_interp
 
FaceDivFree face_divfree_interp
 
CellConservativeLinear lincc_interp
 
CellConservativeLinear cell_cons_interp (false)
 
CellConservativeProtected protected_interp
 
CellConservativeQuartic quartic_interp
 
CellBilinear cell_bilinear_interp
 
CellQuadratic quadratic_interp
 
CellQuartic cell_quartic_interp
 
MFPCInterp mf_pc_interp
 
MFCellConsLinInterp mf_cell_cons_interp (false)
 
MFCellConsLinInterp mf_lincc_interp (true)
 
MFCellConsLinMinmaxLimitInterp mf_linear_slope_minmax_interp
 
MFCellBilinear mf_cell_bilinear_interp
 
MFNodeBilinear mf_node_bilinear_interp
 
constexpr char ResetDisplay [] = "\033[0m"
 
const int []
 
static const char sys_name [] = "IEEE"
 
constexpr gpuError_t gpuSuccess = cudaSuccess
 
amrex::randState_tgpu_rand_state = nullptr
 
constexpr int SpaceDim = 3
 
template<class A >
constexpr bool IsBaseFab_v = IsBaseFab<A>::value
 
template<class A >
constexpr bool IsFabArray_v = IsFabArray<A>::value
 
template<class M >
constexpr bool IsMultiFabLike_v = IsMultiFabLike<M>::value
 
template<typename T , typename... Args>
constexpr bool IsConvertible_v = IsConvertible<T, Args...>::value
 
template<typename From , typename To >
constexpr bool IsNarrowingConversion_v = IsNarrowingConversion<From, To>::value
 
template<typename From , typename To >
constexpr bool IsNonNarrowingConversion_v = !IsNarrowingConversion<From, To>::value
 
static constexpr std::array< IntVect, 3 > E_ixtype {IntVect(0,1,1),IntVect(1,0,1),IntVect(1,1,0)}
 
static constexpr amrex::Real eb_covered_val = amrex::Real(1.e40)
 
EBCellConservativeLinear eb_lincc_interp
 
EBCellConservativeLinear eb_cell_cons_interp (false)
 
EBMFCellConsLinInterp eb_mf_cell_cons_interp (false)
 
EBMFCellConsLinInterp eb_mf_lincc_interp (true)
 

Typedef Documentation

◆ AmrParticleContainer

template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::AmrParticleContainer = typedef AmrParticleContainer_impl<Particle<T_NStructReal, T_NStructInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ Array4

template<typename T >
using amrex::Array4 = typedef ArrayND<T,4, true>

◆ BndryBATransformer

using amrex::BndryBATransformer = typedef BATransformer

◆ BndryData

◆ BndryFunc3DDefault

using amrex::BndryFunc3DDefault = typedef void (*)(Real* data, const int* lo, const int* hi, const int* dom_lo, const int* dom_hi, const Real* dx, const Real* grd_lo, const Real* time, const int* bc)

◆ BndryFuncDefault

using amrex::BndryFuncDefault = typedef void (*)(Real* data, const int&,const int&,const int&, const int&,const int&,const int&, const int* dom_lo, const int* dom_hi, const Real* dx, const Real* grd_lo, const Real* time, const int* bc)

◆ BndryFuncFabDefault

using amrex::BndryFuncFabDefault = typedef std::function<void(Box const& bx, FArrayBox& data, int dcomp, int numcomp, Geometry const& geom, Real time, const Vector<BCRec>& bcr, int bcomp, int scomp)>

◆ BndryRegister

◆ Box

typedef BoxND< 3 > amrex::Box

Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.

◆ BoxIndexer

using amrex::BoxIndexer = typedef BoxIndexerND<3>

◆ BoxIterator

using amrex::BoxIterator = typedef BoxIteratorND<3>

◆ cMultiFab

◆ DefaultAllocator

template<class T >
using amrex::DefaultAllocator = typedef amrex::ArenaAllocator<T>

◆ DeriveFunc

using amrex::DeriveFunc = typedef void (*)(amrex::Real* data, const int&,const int&,const int&, const int&,const int&,const int&, const int* nvar, const amrex::Real* compdat, const int&,const int&,const int&, const int&,const int&,const int&, const int* ncomp, const int* lo, const int* hi, const int* domain_lo, const int* domain_hi, const amrex::Real* delta, const amrex::Real* xlo, const amrex::Real* time, const amrex::Real* dt, const int* bcrec, const int* level, const int* grid_no)

Type of extern "C" function called by DeriveRec to compute derived quantity.

Note that AMREX_ARLIM_P will be preprocessed into DIM const int&'s.

Parameters
data
AMREX_ARLIM_P(dlo)
AMREX_ARLIM_P(dhi)
nvar
compdat
AMREX_ARLIM_P(compdat_lo)
AMREX_ARLIM_P(compdat_hi)
ncomp
lo
hi
domain_lo
domain_hi
delta
xlo
time
dt
bcrec
level
grid_no

◆ DeriveFunc3D

using amrex::DeriveFunc3D = typedef void (*)(amrex::Real* data, const int* dlo, const int* dhi, const int* nvar, const amrex::Real* compdat, const int* clo, const int* chi, const int* ncomp, const int* lo, const int* hi, const int* domain_lo, const int* domain_hi, const amrex::Real* delta, const amrex::Real* xlo, const amrex::Real* time, const amrex::Real* dt, const int* bcrec, const int* level, const int* grid_no)

This is dimension agnostic. For example, dlo always has three elements.

Parameters
data
dlo
dhi
nvar
compdat
clo
chi
ncomp
lo
hi
domain_lo
domain_hi
delta
xlo
time
dt
bcrec
level
grid_no

◆ DeriveFuncFab

using amrex::DeriveFuncFab = typedef std::function<void(const amrex::Box& bx, amrex::FArrayBox& derfab, int dcomp, int ncomp, const amrex::FArrayBox& datafab, const amrex::Geometry& geomdata, amrex::Real time, const int* bcrec, int level)>

◆ DeriveFuncMF

using amrex::DeriveFuncMF = typedef std::function<void(amrex::MultiFab& der_mf, int dcomp, int ncomp, const amrex::MultiFab& data_mf, const amrex::Geometry& geomdata, amrex::Real time, const int* bcrec, int level)>

◆ Detected_t

template<template< class... > class Op, class... Args>
using amrex::Detected_t = typedef typename detail::Detector<detail::Nonesuch, void, Op, Args...>::type

◆ DetectedOr

template<class Default , template< class... > class Op, class... Args>
using amrex::DetectedOr = typedef typename detail::Detector<Default, void, Op, Args...>::type

◆ DMRef

◆ EnableIf_t

template<bool B, class T = void>
using amrex::EnableIf_t = typedef std::enable_if_t<B,T>

◆ ErrorFunc2Default

using amrex::ErrorFunc2Default = typedef void (*)(int* tag, const int&,const int&,const int&, const int&,const int&,const int&, const int* tagval, const int* clearval, amrex::Real* data, const int&,const int&,const int&, const int&,const int&,const int&, const int* lo, const int * hi, const int* nvar, const int* domain_lo, const int* domain_hi, const amrex::Real* dx, const int* level, const amrex::Real* avg)

◆ ErrorFunc3DDefault

using amrex::ErrorFunc3DDefault = typedef void (*)(int* tag, const int* tlo, const int* thi, const int* tagval, const int* clearval, amrex::Real* data, const int* data_lo, const int* data_hi, const int* lo, const int * hi, const int* nvar, const int* domain_lo, const int* domain_hi, const amrex::Real* dx, const amrex::Real* xlo, const amrex::Real* prob_lo, const amrex::Real* time, const int* level)

Dimension agnostic version that always has three elements. Note that this is only implemented for the ErrorFunc class, not ErrorFunc2.

Parameters
tag
tlo
thi
tagval
clearval
data
data_lo
data_hi
lo
hi
nvar
domain_lo
domain_hi
dx
xlo
prob_lo
time
level

◆ ErrorFuncDefault

using amrex::ErrorFuncDefault = typedef void (*)(int* tag, const int&,const int&,const int&, const int&,const int&,const int&, const int* tagval, const int* clearval, amrex::Real* data, const int&,const int&,const int&, const int&,const int&,const int&, const int* lo, const int * hi, const int* nvar, const int* domain_lo, const int* domain_hi, const amrex::Real* dx, const amrex::Real* xlo, const amrex::Real* prob_lo, const amrex::Real* time, const int* level)

Type of extern "C" function called by ErrorRec to do tagging of cells for refinement.

◆ ErrorHandler

using amrex::ErrorHandler = typedef void (*)(const char*)

◆ FabSet

using amrex::FabSet = typedef FabSetT<MultiFab>

◆ FArrayBoxFactory

◆ fBndryData

◆ fBndryRegister

◆ fFabSet

using amrex::fFabSet = typedef FabSetT<fMultiFab>

◆ fInterpBndryData

◆ fMultiFab

using amrex::fMultiFab = typedef FabArray<BaseFab<float> >

◆ gpuDeviceProp_t

using amrex::gpuDeviceProp_t = typedef cudaDeviceProp

◆ gpuError_t

using amrex::gpuError_t = typedef cudaError_t

◆ gpuStream_t

using amrex::gpuStream_t = typedef cudaStream_t

◆ IndexType

IndexType is an alias for amrex::IndexTypeND instantiated with AMREX_SPACEDIM.

◆ IntArray

using amrex::IntArray = typedef Array<int , 3>

◆ InterpBndryData

◆ IntVect

typedef IntVectND< 3 > amrex::IntVect

IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.

◆ IsDetected

template<template< class... > class Op, class... Args>
using amrex::IsDetected = typedef typename detail::Detector<detail::Nonesuch, void, Op, Args...>::value_t

◆ IsDetectedExact

template<class Expected , template< typename... > class Op, class... Args>
using amrex::IsDetectedExact = typedef std::is_same<Expected, Detected_t<Op, Args...> >

◆ KeyValuePair

template<typename K , typename V >
using amrex::KeyValuePair = typedef ValLocPair<K,V>

◆ MaxResSteadyClock

using amrex::MaxResSteadyClock = typedef std::conditional_t<std::chrono::high_resolution_clock::is_steady, std::chrono::high_resolution_clock, std::chrono::steady_clock>

◆ MLALaplacian

◆ MLCellLinOp

◆ MLCGSolver

◆ MLMGBndry

◆ MultiFabId

using amrex::MultiFabId = typedef FabArrayId

◆ Negation

template<class B >
using amrex::Negation = typedef std::integral_constant<bool, !bool(B::value)>

◆ ParConstIter

template<int T_NStructReal, int T_NStructInt = 0, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParConstIter = typedef ParConstIter_impl<Particle<T_NStructReal, T_NStructInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ ParConstIterSoA

template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParConstIterSoA = typedef ParConstIter_impl<SoAParticle<T_NArrayReal, T_NArrayInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ ParIterBase

template<bool is_const, int T_NStructReal, int T_NStructInt, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParIterBase = typedef ParIterBase_impl<is_const, Particle<T_NStructReal, T_NStructInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ ParIterBaseSoA

template<bool is_const, int T_NArrayReal = 0, int T_NArrayInt = 0, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParIterBaseSoA = typedef ParIterBase_impl<is_const,SoAParticle<T_NArrayReal, T_NArrayInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ ParIterSoA

template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParIterSoA = typedef ParIter_impl<SoAParticle<T_NArrayReal, T_NArrayInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ ParticleContainerPureSoA

template<int T_NArrayReal, int T_NArrayInt, template< class > class Allocator = DefaultAllocator, class CellAssignor = DefaultAssignor>
using amrex::ParticleContainerPureSoA = typedef ParticleContainer_impl<SoAParticle<T_NArrayReal, T_NArrayInt>, T_NArrayReal, T_NArrayInt, Allocator, CellAssignor>

◆ PTR_TO_VOID_FUNC

using amrex::PTR_TO_VOID_FUNC = typedef void (*)()

◆ randGenerator_t

using amrex::randGenerator_t = typedef curandGenerator_t

◆ randState_t

using amrex::randState_t = typedef curandState_t

◆ RealArray

using amrex::RealArray = typedef Array<Real, 3>

◆ RealVect

◆ RuntimeError

using amrex::RuntimeError = typedef std::runtime_error

◆ SmallRowVector

template<class T , int N, int StartIndex = 0>
using amrex::SmallRowVector = typedef SmallMatrix<T,1,N,Order::F,StartIndex>

◆ SmallVector

template<class T , int N, int StartIndex = 0>
using amrex::SmallVector = typedef SmallMatrix<T,N,1,Order::F,StartIndex>

◆ TheFaArenaPointer

using amrex::TheFaArenaPointer = typedef std::unique_ptr<char, TheFaArenaDeleter>

◆ TracerParIter

using amrex::TracerParIter = typedef ParIter<3>

◆ Tuple

template<class... Ts>
using amrex::Tuple = typedef std::tuple<Ts...>

◆ TypeAt

template<std::size_t I, typename T >
using amrex::TypeAt = typedef typename detail::TypeListGet<I,T>::type

Type at position I of a TypeList.

◆ TypeMultiplier

template<template< class... > class TParam, class... Types>
using amrex::TypeMultiplier = typedef TypeAt<0, decltype(detail::TApply<TParam>( (TypeList<>{} + ... + detail::SingleTypeMultiplier(std::declval<Types>())) ))>

Return the first template argument with the later arguments applied to it. Types of the form T[N] are expanded to T, T, T, T, ... (N times with N >= 1). Types of the form TypeArray<T,N> are expanded to T, T, T, T, ... (N times with N >= 0).

For example, TypeMultiplier<ReduceData, Real[4], int[2], Long> is an alias to the type ReduceData<Real, Real, Real, Real, int, int, Long>.

◆ UserFillBox

using amrex::UserFillBox = typedef void (*)(Box const& bx, Array4<Real> const& dest, int dcomp, int numcomp, GeometryData const& geom, Real time, const BCRec* bcr, int bcomp, int orig_comp)

◆ YAFluxRegister

Enumeration Type Documentation

◆ BottomSolver

enum class amrex::BottomSolver : int
strong
Enumerator
Default 
smoother 
bicgstab 
cg 
bicgcg 
cgbicg 
hypre 
petsc 

◆ ButcherTableauTypes

enum struct amrex::ButcherTableauTypes
strong
Enumerator
User 
ForwardEuler 
Trapezoid 
SSPRK3 
RK4 
NumTypes 

◆ DataLayout

enum class amrex::DataLayout
strong

A tag that defines the data layout policy used by particle tiles.

Enumerator
AoS 
SoA 

◆ Direction

enum class amrex::Direction : int
strong
Enumerator

◆ EBData_t

enum struct amrex::EBData_t : int
strong
Enumerator
levelset 
volfrac 
centroid 
bndrycent 
bndrynorm 
bndryarea 
apx 
apy 
apz 
fcx 
fcy 
fcz 
ecx 
ecy 
ecz 
cellflag 

◆ EBSupport

enum struct amrex::EBSupport : int
strong
Enumerator
none 
basic 

EBCellFlag.

volume 
  • volume fraction
full 
  • area fraction, boundary centroids and face centroids

◆ FabType

enum class amrex::FabType : int
strong
Enumerator
covered 
regular 
singlevalued 
multivalued 
undefined 

◆ FillType

This enum and the FabCopyDescriptor class should really be nested in FabArrayCopyDescriptor (not done for portability reasons).

Enumerator
FillLocally 
FillRemotely 
Unfillable 

◆ FPExcept

enum struct amrex::FPExcept : std::uint8_t
strong
Enumerator
none 
invalid 
zero 
overflow 
all 

◆ GrowthStrategy

enum class amrex::GrowthStrategy : int
strong
Enumerator
Poisson 
Exact 
Geometric 

◆ HypreSolverID

enum struct amrex::HypreSolverID
strong
Enumerator
BoomerAMG 
SSAMG 

◆ IntegratorTypes

enum struct amrex::IntegratorTypes
strong
Enumerator
ForwardEuler 
ExplicitRungeKutta 
Sundials 

◆ InterpEM_t

Enumerator
InterpE 
InterpB 

◆ LinOpBCType

enum struct amrex::LinOpBCType : int
strong
Enumerator
interior 
Dirichlet 
Neumann 
reflect_odd 
Marshak 
SanchezPomraning 
inflow 
inhomogNeumann 
Robin 
symmetry 
Periodic 
bogus 

◆ MakeType

Enumerator
make_alias 
make_deep_copy 

◆ MLMGNormType

enum class amrex::MLMGNormType : int
strong
Enumerator
greater 
bnorm 
resnorm 

◆ Order

enum struct amrex::Order
strong
Enumerator
RowMajor 
ColumnMajor 

◆ RunOn

enum struct amrex::RunOn
strong
Enumerator
Gpu 
Cpu 
Device 
Host 

Function Documentation

◆ Abort() [1/2]

__host__ __device__ void amrex::Abort ( const char *  msg = nullptr)
inline

◆ Abort() [2/2]

void amrex::Abort ( const std::string &  msg)

Print out message to cerr and exit via abort().

◆ abs()

template<typename T >
__host__ __device__ T amrex::abs ( const GpuComplex< T > &  a_z)
inlinenoexcept

Return the absolute value of a complex number.

◆ Abs() [1/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Abs ( FabArray< FAB > &  fa,
int  icomp,
int  numcomp,
const IntVect nghost 
)

◆ Abs() [2/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Abs ( FabArray< FAB > &  fa,
int  icomp,
int  numcomp,
int  nghost 
)

◆ accrete() [1/2]

void amrex::accrete ( BoxDomain dest,
const BoxDomain fin,
int  sz 
)

Grow each Box in BoxDomain fin by size sz and place the result into BoxDomain dest.

◆ accrete() [2/2]

BoxList amrex::accrete ( const BoxList bl,
int  sz 
)

Returns a new BoxList in which each Box is grown by the given size.

◆ Add() [1/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Add ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Add() [2/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Add ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ adjCell()

template<int dim>
__host__ __device__ BoxND< dim > amrex::adjCell ( const BoxND< dim > &  b,
Orientation  face,
int  len = 1 
)
inlinenoexcept

Similar to adjCellLo and adjCellHi; operates on given face.

◆ adjCellHi()

template<int dim>
__host__ __device__ BoxND< dim > amrex::adjCellHi ( const BoxND< dim > &  b,
int  dir,
int  len = 1 
)
inlinenoexcept

Similar to adjCellLo but builds an adjacent BoxND on the high end.

◆ adjCellLo()

template<int dim>
__host__ __device__ BoxND< dim > amrex::adjCellLo ( const BoxND< dim > &  b,
int  dir,
int  len = 1 
)
inlinenoexcept

Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direction dir. The return BoxND is identical to b in the other directions. The return BoxND and b have an empty intersection. NOTE: len >= 1 NOTE: BoxND retval = b.adjCellLo(b,dir,len) is equivalent to the following set of operations: BoxND retval(b); retval.convert(dir,BoxND::CELL); retval.setrange(dir,retval.smallEnd(dir)-len,len);.

◆ aligned_size()

std::size_t amrex::aligned_size ( std::size_t  align_requirement,
std::size_t  size 
)
inlinenoexcept

Given a minimum required size in bytes, this returns the smallest size greater or equal to size that is a multiple of align_requirement.

◆ AllGatherBoxes()

void amrex::AllGatherBoxes ( Vector< Box > &  bxs,
int  n_extra_reserve 
)

◆ AlmostEqual()

bool amrex::AlmostEqual ( const RealBox box1,
const RealBox box2,
Real  eps 
)
noexcept

Check for equality of real boxes within a certain tolerance.

◆ almostEqual()

template<typename T >
__host__ __device__ std::enable_if_t< std::is_floating_point_v< T >, bool > amrex::almostEqual ( x,
y,
int  ulp = 2 
)
inline

Return if the difference between two values are small as measured by the given ulp (units in the last place).

◆ amrex_flux_redistribute() [1/2]

void amrex::amrex_flux_redistribute ( const amrex::Box bx,
amrex::Array4< amrex::Real > const &  dqdt,
amrex::Array4< amrex::Real const > const &  divc,
amrex::Array4< amrex::Real const > const &  wt,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
int  as_crse,
amrex::Array4< amrex::Real > const &  rr_drho_crse,
amrex::Array4< int const > const &  rr_flag_crse,
int  as_fine,
amrex::Array4< amrex::Real > const &  dm_as_fine,
amrex::Array4< int const > const &  levmsk,
const amrex::Geometry geom,
bool  use_wts_in_divnc,
int  level_mask_not_covered,
int  icomp,
int  ncomp,
amrex::Real  dt 
)

◆ amrex_flux_redistribute() [2/2]

void amrex::amrex_flux_redistribute ( const Box bx,
Array4< Real > const &  dqdt,
Array4< Real const > const &  divc,
Array4< Real const > const &  wt,
Array4< Real const > const &  vfrac,
Array4< EBCellFlag const > const &  flag,
int  as_crse,
Array4< Real > const &  rr_drho_crse,
Array4< int const > const &  rr_flag_crse,
int  as_fine,
Array4< Real > const &  dm_as_fine,
Array4< int const > const &  levmsk,
const Geometry geom,
bool  use_wts_in_divnc,
int  level_mask_not_covered,
int  icomp,
int  ncomp,
Real  dt 
)

◆ amrex_get_enum_traits() [1/2]

GrowthStrategy_EnumTraits amrex::amrex_get_enum_traits ( GrowthStrategy  )

◆ amrex_get_enum_traits() [2/2]

MLMGNormType_EnumTraits amrex::amrex_get_enum_traits ( MLMGNormType  )

◆ any()

bool amrex::any ( FPExcept  a)
inline

◆ AnyCTO()

template<class L , class... Fs, typename... CTOs>
void amrex::AnyCTO ( TypeList< CTOs... >  list_of_compile_time_options,
std::array< int, sizeof...(CTOs)> const &  runtime_options,
L &&  l,
Fs &&...  cto_functs 
)

Compile time optimization of kernels with run time options.

This is a generalized version of ParallelFor with CTOs that can support any function that takes in one lambda to launch a GPU kernel such as ParallelFor, ParallelForRNG, launch, etc. It uses fold expression to generate kernel launches for all combinations of the run time options. The kernel function can use constexpr if to discard unused code blocks for better run time performance. In the example below, the code will be expanded into 4*2=8 normal ParallelForRNGs for all combinations of the run time parameters.

int A_runtime_option = ...;
int B_runtime_option = ...;
enum A_options : int { A0, A1, A2, A3 };
enum B_options : int { B0, B1 };
AnyCTO(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
CompileTimeOptions<B0,B1>>{},
{A_runtime_option, B_runtime_option},
[&](auto cto_func){
ParallelForRNG(N, cto_func);
},
[=] AMREX_GPU_DEVICE (int i, const RandomEngine& engine,
auto A_control, auto B_control)
{
...
if constexpr (A_control.value == A0) {
...
} else if constexpr (A_control.value == A1) {
...
} else if constexpr (A_control.value == A2) {
...
} else {
...
}
if constexpr (A_control.value != A3 && B_control.value == B1) {
...
}
...
}
);
constexpr int nthreads_per_block = ...;
int nblocks = ...;
AnyCTO(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
CompileTimeOptions<B0,B1>>{},
{A_runtime_option, B_runtime_option},
[&](auto cto_func){
launch<nthreads_per_block>(nblocks, Gpu::gpuStream(), cto_func);
},
[=] AMREX_GPU_DEVICE (auto A_control, auto B_control){
...
}
);
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:244
void AnyCTO(TypeList< CTOs... > list_of_compile_time_options, std::array< int, sizeof...(CTOs)> const &runtime_options, L &&l, Fs &&...cto_functs)
Compile time optimization of kernels with run time options.
Definition AMReX_CTOParallelForImpl.H:181
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG(T n, L const &f) noexcept
Definition AMReX_GpuLaunchFunctsC.H:1274

The static member function cto_func.GetOptions() can be used to obtain the runtime_options passed into AnyCTO, but at compile time. This enables some advanced use cases, such as changing the number of threads per block or the dimensionality of ParallelFor at runtime. For the second example -> decltype(void(intvect.size())) is necessary to disambiguate IntVectND<1> and int for the first argument of the kernel function.

int nthreads_per_block = ...;
{nthreads_per_block},
[&](auto cto_func){
constexpr std::array<int, 1> ctos = cto_func.GetOptions();
constexpr int c_nthreads_per_block = ctos[0];
ParallelFor<c_nthreads_per_block>(N, cto_func);
},
[=] AMREX_GPU_DEVICE (int i, auto){
...
}
);
BoxND<6> box6D = ...;
int dims_needed = ...;
AnyCTO(TypeList<CompileTimeOptions<1,2,3,4,5,6>>{},
{dims_needed},
[&](auto cto_func){
constexpr std::array<int, 1> ctos = cto_func.GetOptions();
constexpr int c_dims_needed = ctos[0];
const auto box = BoxShrink<c_dims_needed>(box6D);
ParallelFor(box, cto_func);
},
[=] AMREX_GPU_DEVICE (auto intvect, auto) -> decltype(void(intvect.size())) {
...
}
);
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
Definition AMReX_CTOParallelForImpl.H:20
Struct for holding types.
Definition AMReX_TypeList.H:12

Note that due to a limitation of CUDA's extended device lambda, the constexpr if block cannot be the one that captures a variable first. If nvcc complains about it, you will have to manually capture it outside constexpr if. Alternatively, the constexpr if can be replaced with a regular if. Compilers can still perform the same optimizations since the condition is known at compile time. The data type for the parameters is int.

Parameters
list_of_compile_time_optionslist of all possible values of the parameters.
runtime_optionsthe run time parameters.
la callable object containing a CPU function that launches the provided GPU kernel.
cto_functsa callable object containing the GPU kernel with optimizations.

◆ Apply() [1/2]

template<typename F , int dim>
__host__ __device__ constexpr auto amrex::Apply ( F &&  f,
IntVectND< dim > const &  iv 
)
constexpr

Invoke the callable f with the elements of the given IntVectND object as arguments.

◆ Apply() [2/2]

template<typename F , typename TP >
__host__ __device__ constexpr auto amrex::Apply ( F &&  f,
TP &&  t 
) -> typename detail::apply_result<F,detail::tuple_decay_t<TP> >::type
constexpr

◆ apply_eb_redistribution()

void amrex::apply_eb_redistribution ( const Box bx,
MultiFab div_mf,
MultiFab divc_mf,
const MultiFab weights,
MFIter mfi,
int  icomp,
int  ncomp,
const EBCellFlagFab flags_fab,
const MultiFab volfrac,
Box ,
const Geometry geom,
bool  use_wts_in_divnc 
)

◆ apply_flux_redistribution() [1/2]

void amrex::apply_flux_redistribution ( const amrex::Box bx,
amrex::Array4< amrex::Real > const &  div,
amrex::Array4< amrex::Real const > const &  divc,
amrex::Array4< amrex::Real const > const &  wt,
int  icomp,
int  ncomp,
amrex::Array4< amrex::EBCellFlag const > const &  flag_arr,
amrex::Array4< amrex::Real const > const &  vfrac,
const amrex::Geometry geom,
bool  use_wts_in_divnc 
)

◆ apply_flux_redistribution() [2/2]

void amrex::apply_flux_redistribution ( const Box bx,
Array4< Real > const &  div,
Array4< Real const > const &  divc,
Array4< Real const > const &  wt,
int  icomp,
int  ncomp,
Array4< EBCellFlag const > const &  flag_arr,
Array4< Real const > const &  vfrac,
const Geometry geom,
bool  use_wts_in_divnc 
)

◆ ApplyInitialRedistribution() [1/2]

void amrex::ApplyInitialRedistribution ( amrex::Box const &  bx,
int  ncomp,
amrex::Array4< amrex::Real > const &  U_out,
amrex::Array4< amrex::Real > const &  U_in,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  apx,
amrex::Array4< amrex::Real const > const &  apy,
amrex::Array4< amrex::Real const > const &  apz,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
amrex::Geometry const &  geom,
std::string const &  redistribution_type,
int  srd_max_order = 2,
amrex::Real  target_volfrac = 0.5_rt 
)

◆ ApplyInitialRedistribution() [2/2]

void amrex::ApplyInitialRedistribution ( Box const &  bx,
int  ncomp,
Array4< Real > const &  U_out,
Array4< Real > const &  U_in,
Array4< EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  apx,
amrex::Array4< amrex::Real const > const &  apy,
amrex::Array4< amrex::Real const > const &  apz,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
Geometry const &  lev_geom,
std::string const &  redistribution_type,
int  srd_max_order,
amrex::Real  target_volfrac 
)

◆ ApplyMLRedistribution() [1/2]

void amrex::ApplyMLRedistribution ( amrex::Box const &  bx,
int  ncomp,
amrex::Array4< amrex::Real > const &  dUdt_out,
amrex::Array4< amrex::Real > const &  dUdt_in,
amrex::Array4< amrex::Real const > const &  U_in,
amrex::Array4< amrex::Real > const &  scratch,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  apx,
amrex::Array4< amrex::Real const > const &  apy,
amrex::Array4< amrex::Real const > const &  apz,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
amrex::Geometry const &  lev_geom,
amrex::Real  dt,
std::string const &  redistribution_type,
int  as_crse,
amrex::Array4< amrex::Real > const &  rr_drho_crse,
amrex::Array4< int const > const &  rr_flag_crse,
int  as_fine,
amrex::Array4< amrex::Real > const &  dm_as_fine,
amrex::Array4< int const > const &  levmsk,
int  level_mask_not_covered,
amrex::Real  fac_for_deltaR = 1.0_rt,
bool  use_wts_in_divnc = false,
int  icomp = 0,
int  srd_max_order = 2,
amrex::Real  target_volfrac = 0.5_rt,
amrex::Array4< amrex::Real const > const &  update_scale = {} 
)

◆ ApplyMLRedistribution() [2/2]

void amrex::ApplyMLRedistribution ( Box const &  bx,
int  ncomp,
Array4< Real > const &  dUdt_out,
Array4< Real > const &  dUdt_in,
Array4< Real const > const &  U_in,
Array4< Real > const &  scratch,
Array4< EBCellFlag const > const &  flag,
Array4< Real const > const &  apx,
Array4< Real const > const &  apy,
Array4< Real const > const &  apz,
Array4< amrex::Real const > const &  vfrac,
Array4< Real const > const &  fcx,
Array4< Real const > const &  fcy,
Array4< Real const > const &  fcz,
Array4< Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
Geometry const &  lev_geom,
Real  dt,
std::string const &  redistribution_type,
int  as_crse,
Array4< Real > const &  rr_drho_crse,
Array4< int const > const &  rr_flag_crse,
int  as_fine,
Array4< Real > const &  dm_as_fine,
Array4< int const > const &  levmsk,
int  level_mask_not_covered,
Real  fac_for_deltaR,
bool  use_wts_in_divnc,
int  icomp,
int  srd_max_order,
amrex::Real  target_volfrac,
Array4< Real const > const &  srd_update_scale 
)

◆ ApplyRedistribution() [1/2]

void amrex::ApplyRedistribution ( amrex::Box const &  bx,
int  ncomp,
amrex::Array4< amrex::Real > const &  dUdt_out,
amrex::Array4< amrex::Real > const &  dUdt_in,
amrex::Array4< amrex::Real const > const &  U_in,
amrex::Array4< amrex::Real > const &  scratch,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  apx,
amrex::Array4< amrex::Real const > const &  apy,
amrex::Array4< amrex::Real const > const &  apz,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
amrex::Geometry const &  lev_geom,
amrex::Real  dt,
std::string const &  redistribution_type,
bool  use_wts_in_divnc = false,
int  srd_max_order = 2,
amrex::Real  target_volfrac = 0.5_rt,
amrex::Array4< amrex::Real const > const &  update_scale = {} 
)

◆ ApplyRedistribution() [2/2]

void amrex::ApplyRedistribution ( Box const &  bx,
int  ncomp,
Array4< Real > const &  dUdt_out,
Array4< Real > const &  dUdt_in,
Array4< Real const > const &  U_in,
Array4< Real > const &  scratch,
Array4< EBCellFlag const > const &  flag,
Array4< Real const > const &  apx,
Array4< Real const > const &  apy,
Array4< Real const > const &  apz,
Array4< amrex::Real const > const &  vfrac,
Array4< Real const > const &  fcx,
Array4< Real const > const &  fcy,
Array4< Real const > const &  fcz,
Array4< Real const > const &  ccc,
amrex::BCRec const *  d_bcrec_ptr,
Geometry const &  lev_geom,
Real  dt,
std::string const &  redistribution_type,
bool  use_wts_in_divnc,
int  srd_max_order,
amrex::Real  target_volfrac,
Array4< Real const > const &  srd_update_scale 
)

◆ arg()

template<typename T >
__host__ __device__ T amrex::arg ( const GpuComplex< T > &  a_z)
inlinenoexcept

Return the angle of a complex number's polar representation.

◆ ArrayND() [1/5]

template<typename T , int N>
amrex::ArrayND ( T *  ,
BoxND< N > const &   
) -> ArrayND< T, N, false >

◆ ArrayND() [2/5]

template<typename T , int N>
amrex::ArrayND ( T *  ,
BoxND< N > const &  ,
int   
) -> ArrayND< T, N+1, true >

◆ ArrayND() [3/5]

template<typename T >
amrex::ArrayND ( T *  ,
Dim3 const &  ,
Dim3 const &  ,
int   
) -> ArrayND< T, 4, true >

◆ ArrayND() [4/5]

template<typename T , int N>
amrex::ArrayND ( T *  ,
IntVectND< N > const &  ,
IntVectND< N > const &   
) -> ArrayND< T, N, false >

◆ ArrayND() [5/5]

template<typename T , int N>
amrex::ArrayND ( T *  ,
IntVectND< N > const &  ,
IntVectND< N > const &  ,
int   
) -> ArrayND< T, N+1, true >

◆ Assert() [1/3]

__host__ __device__ void amrex::Assert ( const char *  EX,
const char *  file,
int  line 
)
inline

◆ Assert() [2/3]

__host__ __device__ void amrex::Assert ( const char *  EX,
const char *  file,
int  line,
const char *  msg 
)
inline

◆ Assert() [3/3]

void amrex::Assert ( const char *  EX,
const char *  file,
int  line,
const std::string &  msg 
)
inline

◆ Assert_host()

void amrex::Assert_host ( const char *  EX,
const char *  file,
int  line,
const char *  msg,
std::size_t  msg_size = 0 
)

Prints assertion failed messages to cerr and exits via abort(). Intended for use by the BL_ASSERT() macro in <AMReX_BLassert.H>.

◆ average_cellcenter_to_face() [1/2]

void amrex::average_cellcenter_to_face ( const Array< MultiFab *, 3 > &  fc,
const MultiFab cc,
const Geometry geom,
int  ncomp,
bool  use_harmonic_averaging 
)

Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.

◆ average_cellcenter_to_face() [2/2]

void amrex::average_cellcenter_to_face ( const Vector< MultiFab * > &  fc,
const MultiFab cc,
const Geometry geom,
int  ncomp,
bool  use_harmonic_averaging 
)

Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.

◆ average_down() [1/4]

template<typename FAB >
void amrex::average_down ( const FabArray< FAB > &  S_fine,
FabArray< FAB > &  S_crse,
int  scomp,
int  ncomp,
const IntVect ratio 
)

Average MultiFab onto crse MultiFab without volume weighting. This routine DOES NOT assume that the crse BoxArray is a coarsened version of the fine BoxArray. Work for both cell-centered and nodal MultiFabs.

◆ average_down() [2/4]

template<typename FAB >
void amrex::average_down ( const FabArray< FAB > &  S_fine,
FabArray< FAB > &  S_crse,
int  scomp,
int  ncomp,
int  rr 
)

◆ average_down() [3/4]

void amrex::average_down ( const MultiFab S_fine,
MultiFab S_crse,
const Geometry fgeom,
const Geometry cgeom,
int  scomp,
int  ncomp,
const IntVect ratio 
)

Volume weighed average of fine MultiFab onto coarse MultiFab.

Both MultiFabs are assumed to be cell-centered. This routine DOES NOT assume that the crse BoxArray is a coarsened version of the fine BoxArray.

◆ average_down() [4/4]

void amrex::average_down ( const MultiFab S_fine,
MultiFab S_crse,
const Geometry fgeom,
const Geometry cgeom,
int  scomp,
int  ncomp,
int  rr 
)

◆ average_down_edges() [1/3]

void amrex::average_down_edges ( const Array< const MultiFab *, 3 > &  fine,
const Array< MultiFab *, 3 > &  crse,
const IntVect ratio,
int  ngcrse 
)

◆ average_down_edges() [2/3]

void amrex::average_down_edges ( const MultiFab fine,
MultiFab crse,
const IntVect ratio,
int  ngcrse = 0 
)

This version does average down for one direction. It uses the IndexType of MultiFabs to determine the direction. It is expected that one direction is cell-centered and the rest are nodal.

◆ average_down_edges() [3/3]

void amrex::average_down_edges ( const Vector< const MultiFab * > &  fine,
const Vector< MultiFab * > &  crse,
const IntVect ratio,
int  ngcrse 
)

Average fine edge-based MultiFab onto crse edge-based MultiFab.

Average fine edge-based MultiFab onto crse edge-based MultiFab. This routine does NOT assume that the crse BoxArray is a coarsened version of the fine BoxArray.

◆ average_down_faces() [1/7]

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void amrex::average_down_faces ( const Array< const MF *, 3 > &  fine,
const Array< MF *, 3 > &  crse,
const IntVect ratio,
const Geometry crse_geom 
)

◆ average_down_faces() [2/7]

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void amrex::average_down_faces ( const Array< const MF *, 3 > &  fine,
const Array< MF *, 3 > &  crse,
const IntVect ratio,
int  ngcrse = 0 
)

Average fine face-based FabArray onto crse face-based FabArray.

◆ average_down_faces() [3/7]

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void amrex::average_down_faces ( const Array< const MF *, 3 > &  fine,
const Array< MF *, 3 > &  crse,
int  ratio,
int  ngcrse = 0 
)

Average fine face-based FabArray onto crse face-based FabArray.

◆ average_down_faces() [4/7]

template<typename FAB >
void amrex::average_down_faces ( const FabArray< FAB > &  fine,
FabArray< FAB > &  crse,
const IntVect ratio,
const Geometry crse_geom 
)

◆ average_down_faces() [5/7]

template<typename FAB >
void amrex::average_down_faces ( const FabArray< FAB > &  fine,
FabArray< FAB > &  crse,
const IntVect ratio,
int  ngcrse = 0 
)

This version does average down for one face direction.

It uses the IndexType of MultiFabs to determine the direction. It is expected that one direction is nodal and the rest are cell-centered.

◆ average_down_faces() [6/7]

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void amrex::average_down_faces ( const Vector< const MF * > &  fine,
const Vector< MF * > &  crse,
const IntVect ratio,
int  ngcrse = 0 
)

Average fine face-based FabArray onto crse face-based FabArray.

◆ average_down_faces() [7/7]

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > = 0>
void amrex::average_down_faces ( const Vector< const MF * > &  fine,
const Vector< MF * > &  crse,
int  ratio,
int  ngcrse = 0 
)

Average fine face-based FabArray onto crse face-based FabArray.

◆ average_down_nodal()

template<typename FAB >
void amrex::average_down_nodal ( const FabArray< FAB > &  fine,
FabArray< FAB > &  crse,
const IntVect ratio,
int  ngcrse,
bool  mfiter_is_definitely_safe 
)

Average fine node-based MultiFab onto crse node-centered MultiFab.

Average fine node-based MultiFab onto crse node-based MultiFab. This routine assumes that the crse BoxArray is a coarsened version of the fine BoxArray.

◆ average_edge_to_cellcenter() [1/2]

void amrex::average_edge_to_cellcenter ( MultiFab cc,
int  dcomp,
const Vector< const MultiFab * > &  edge,
int  ngrow = 0 
)

Average edge-based MultiFab onto cell-centered MultiFab.

This fills in ngrow ghost cells in the cell-centered MultiFab. Both cell-centered and edge-centered MultiFabs need to have ngrow ghost values.

◆ average_edge_to_cellcenter() [2/2]

void amrex::average_edge_to_cellcenter ( MultiFab cc,
int  dcomp,
const Vector< const MultiFab * > &  edge,
IntVect const &  ng_vect 
)

◆ average_face_to_cellcenter() [1/6]

template<typename CMF , typename FMF , std::enable_if_t< IsFabArray_v< CMF > &&IsFabArray_v< FMF >, int > = 0>
void amrex::average_face_to_cellcenter ( CMF &  cc,
int  dcomp,
const Array< const FMF *, 3 > &  fc,
int  ngrow = 0 
)

Average face-based FabArray onto cell-centered FabArray.

◆ average_face_to_cellcenter() [2/6]

template<typename CMF , typename FMF , std::enable_if_t< IsFabArray_v< CMF > &&IsFabArray_v< FMF >, int > = 0>
void amrex::average_face_to_cellcenter ( CMF &  cc,
int  dcomp,
const Array< const FMF *, 3 > &  fc,
IntVect const &  ng_vect 
)

◆ average_face_to_cellcenter() [3/6]

void amrex::average_face_to_cellcenter ( MultiFab cc,
const Array< const MultiFab *, 3 > &  fc,
const Geometry geom 
)

Average face-based MultiFab onto cell-centered MultiFab with geometric weighting.

◆ average_face_to_cellcenter() [4/6]

void amrex::average_face_to_cellcenter ( MultiFab cc,
const Vector< const MultiFab * > &  fc,
const Geometry geom 
)

Average face-based MultiFab onto cell-centered MultiFab with geometric weighting.

◆ average_face_to_cellcenter() [5/6]

void amrex::average_face_to_cellcenter ( MultiFab cc,
int  dcomp,
const Vector< const MultiFab * > &  fc,
int  ngrow = 0 
)

Average face-based MultiFab onto cell-centered MultiFab.

This fills in ngrow ghost cells in the cell-centered MultiFab. Both cell-centered and face-centered MultiFabs need to have ngrow ghost values.

◆ average_face_to_cellcenter() [6/6]

void amrex::average_face_to_cellcenter ( MultiFab cc,
int  dcomp,
const Vector< const MultiFab * > &  fc,
IntVect const &  ng_vect 
)

◆ average_node_to_cellcenter() [1/2]

void amrex::average_node_to_cellcenter ( MultiFab cc,
int  dcomp,
const MultiFab nd,
int  scomp,
int  ncomp,
int  ngrow 
)

Average nodal-based MultiFab onto cell-centered MultiFab.

◆ average_node_to_cellcenter() [2/2]

void amrex::average_node_to_cellcenter ( MultiFab cc,
int  dcomp,
const MultiFab nd,
int  scomp,
int  ncomp,
IntVect const &  ng_vect 
)

◆ Axpy()

template<typename T , typename Allocator >
void amrex::Axpy ( AlgVector< T, Allocator > &  y,
a,
AlgVector< T, Allocator > const &  x 
)

y = ax + y. For GPU guilds, this function is asynchronous with respect to the host.

◆ BaseFab_Finalize()

void amrex::BaseFab_Finalize ( )

◆ BaseFab_Initialize()

void amrex::BaseFab_Initialize ( )

◆ BASISREALV()

template<int dim = 3>
__host__ __device__ RealVectND< dim > amrex::BASISREALV ( int  dir)
inlinenoexcept

Returns a basis vector in the given coordinate direction.
In 2-D:
BASISREALV(0) == (1.,0.); BASISREALV(1) == (0.,1.).
In 3-D:
BASISREALV(0) == (1.,0.,0.); BASISREALV(1) == (0.,1.,0.); BASISREALV(2) == (0.,0.,1.).
Note that the coordinate directions are based at zero.

◆ BASISV()

template<int dim = 3>
__host__ __device__ IntVectND< dim > amrex::BASISV ( int  dir)
inlinenoexcept

Returns a basis vector in the given coordinate direction; eg. IntVectND<3> BASISV<3>(1) == (0,1,0). Note that the coordinate directions are zero based.

◆ bdryHi()

template<int dim>
__host__ __device__ BoxND< dim > amrex::bdryHi ( const BoxND< dim > &  b,
int  dir,
int  len = 1 
)
inlinenoexcept

Return the edge-centered BoxND (in direction dir) defining the high side of BoxND b.

◆ bdryLo()

template<int dim>
__host__ __device__ BoxND< dim > amrex::bdryLo ( const BoxND< dim > &  b,
int  dir,
int  len = 1 
)
inlinenoexcept

Return the edge-centered BoxND (in direction dir) defining the low side of BoxND b.

◆ bdryNode()

template<int dim>
__host__ __device__ BoxND< dim > amrex::bdryNode ( const BoxND< dim > &  b,
Orientation  face,
int  len = 1 
)
inlinenoexcept

Similar to bdryLo and bdryHi except that it operates on the given face of box b.

◆ begin()

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::begin ( BoxND< dim > const &  box)
inlinenoexcept

◆ begin_iv()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::begin_iv ( BoxND< dim > const &  box)
inlinenoexcept

◆ bisect() [1/2]

template<typename T , typename I , std::enable_if_t< std::is_integral_v< I >, int > = 0>
__host__ __device__ I amrex::bisect ( T const *  d,
lo,
hi,
T const &  v 
)
inline

Find the index of the interval containing a value in a sorted array.

Find index i in the range [lo,hi) that d[i] <= v < d[i+1]. It is assumed that the input data are sorted and d[lo] <= v < d[hi]. Note that this is different from std::lower_bound that searches for the first element which is not less than v.

Template Parameters
Tvalue type.
Iintegral index type.
Parameters
dpointer to a sorted array of values.
loinclusive lower bound of the search range.
hiexclusive upper bound of the search range.
vvalue to be located.
Returns
an index i such that d[i] <= v < d[i+1].

◆ bisect() [2/2]

template<class T , class F , std::enable_if_t< std::is_floating_point_v< T >, int > FOO = 0>
__host__ __device__ T amrex::bisect ( lo,
hi,
F  f,
tol = 1e-12,
int  max_iter = 100 
)
inline

Find a root of a scalar function on a bracketing interval using bisection.

It's a runtime error if the root finding fails.

Template Parameters
Tfloating-point type
Fcallable type of the scalar function
Parameters
lolower bound
hiupper bound
fscalar function
tolabsolute tolerance. Iteration stops when hi-lo < tol or when `almostEqual(lo,hi) returns true.
max_itermaximum number of bisection iterations allowed.
Returns
an approximate root found using bisection.

◆ boxArray() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
BoxArray const & amrex::boxArray ( Array< MF, N > const &  mf)

◆ boxArray() [2/2]

BoxArray const & amrex::boxArray ( FabArrayBase const &  fa)

◆ BoxCat()

template<int d, int... dims>
__host__ __device__ constexpr BoxND< detail::get_sum< d, dims... >()> amrex::BoxCat ( const BoxND< d > &  bx,
const BoxND< dims > &...  boxes 
)
inlineconstexprnoexcept

Return a BoxND obtained by concatenating the input BoxNDs. The dimension of the return value equals the sum of the dimensions of the inputted BoxNDs.

◆ boxComplement()

BoxArray amrex::boxComplement ( const Box b1in,
const Box b2 
)

Make a BoxArray from the the complement of b2 in b1in.

◆ boxDiff() [1/2]

void amrex::boxDiff ( BoxList bl_diff,
const Box b1in,
const Box b2 
)

◆ boxDiff() [2/2]

BoxList amrex::boxDiff ( const Box b1in,
const Box b2 
)

Returns BoxList defining the compliment of b2 in b1in.

◆ BoxExpand()

template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > amrex::BoxExpand ( const BoxND< old_dim > &  bx)
inlineconstexprnoexcept

Return a new BoxND of size new_dim and assigns all values of this BoxND to it and (small=0, big=0, typ=CELL) to the remaining elements.

◆ BoxResize()

template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > amrex::BoxResize ( const BoxND< old_dim > &  bx)
inlineconstexprnoexcept

Return a new BoxND of size new_dim by either shrinking or expanding this BoxND.

◆ BoxShrink()

template<int new_dim, int old_dim>
__host__ __device__ constexpr BoxND< new_dim > amrex::BoxShrink ( const BoxND< old_dim > &  bx)
inlineconstexprnoexcept

Return a new BoxND of dimension new_dim and assigns the first new_dim dimension of this BoxND to it.

◆ BoxSplit()

template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< BoxND< d >, BoxND< dims >... > amrex::BoxSplit ( const BoxND< detail::get_sum< d, dims... >()> &  bx)
inlineconstexprnoexcept

Return a tuple of BoxNDs obtained by splitting the input BoxND according to the dimensions specified by the template arguments.

◆ BroadcastArray()

template<class T >
void amrex::BroadcastArray ( Vector< T > &  aT,
int  myLocalId,
int  rootId,
const MPI_Comm localComm 
)

◆ BroadcastBool()

void amrex::BroadcastBool ( bool &  bBool,
int  myLocalId,
int  rootId,
const MPI_Comm localComm 
)

◆ BroadcastString()

void amrex::BroadcastString ( std::string &  bStr,
int  myLocalId,
int  rootId,
const MPI_Comm localComm 
)

◆ BroadcastStringArray()

void amrex::BroadcastStringArray ( Vector< std::string > &  bSA,
int  myLocalId,
int  rootId,
const MPI_Comm localComm 
)

◆ bytesOf() [1/2]

template<typename Key , typename T , class Compare >
amrex::Long amrex::bytesOf ( const std::map< Key, T, Compare > &  m)

◆ bytesOf() [2/2]

template<typename T >
amrex::Long amrex::bytesOf ( const std::vector< T > &  v)

◆ callNoinline()

template<typename F , typename... T>
__host__ __device__ auto amrex::callNoinline ( F const &  f,
T &&...  arg 
) -> decltype(std::declval<F>()(std::declval<T>()...))

Call given function without inline.

This works on lambdas, functors, normal functions. But it does not work with overloaded functions like std::sin. If needed, one could however wrap functions like std::sin inside a lambda function. It also does not work with normal functions for SYCL and one would have to wrap it inside a lambda.

◆ CartesianProduct()

template<typename... Ls>
constexpr auto amrex::CartesianProduct ( Ls...  )
constexpr

Cartesian Product of TypeLists.

For example,

CartesianProduct(TypeList<std::integral_constant<int,0>,
std::integral_constant<int,1>>{},
TypeList<std::integral_constant<int,2>,
std::integral_constant<int,3>>{});
constexpr auto CartesianProduct(Ls...)
Cartesian Product of TypeLists.
Definition AMReX_TypeList.H:154

returns TypeList of TypeList of integral_constants {{0,2},{1,2},{0,3},{1,3}}.

◆ cast() [1/2]

template<class Tto , class Tfrom >
__host__ __device__ void amrex::cast ( BaseFab< Tto > &  tofab,
BaseFab< Tfrom > const &  fromfab,
Box const &  bx,
SrcComp  scomp,
DestComp  dcomp,
NumComps  ncomp 
)
noexcept

◆ cast() [2/2]

template<typename T , typename U >
T amrex::cast ( U const &  mf_in)

example: auto mf = amrex::cast<MultiFab>(imf);

◆ Clamp()

template<typename T >
__host__ __device__ constexpr const T & amrex::Clamp ( const T &  v,
const T &  lo,
const T &  hi 
)
inlineconstexpr

Return the reference to lo if v < lo; return the reference to hi if hi < v; otherwise return the reference to v. This function was added to AMReX before switching to C++17.std::clamp` can now be used directly instead.

◆ clz()

template<class T , std::enable_if_t< std::is_same_v< std::decay_t< T >, std::uint8_t >||std::is_same_v< std::decay_t< T >, std::uint16_t >||std::is_same_v< std::decay_t< T >, std::uint32_t >||std::is_same_v< std::decay_t< T >, std::uint64_t >, int > = 0>
__host__ __device__ int amrex::clz ( x)
inlinenoexcept

Return the number of leading zeros of the given integer.

◆ coarsen() [1/11]

void amrex::coarsen ( BoxDomain dest,
const BoxDomain fin,
int  ratio 
)

Coarsen all Boxes in the domain by the refinement ratio. The result is placed into a new BoxDomain.

◆ coarsen() [2/11]

BoxArray amrex::coarsen ( const BoxArray ba,
const IntVect ratio 
)

◆ coarsen() [3/11]

BoxArray amrex::coarsen ( const BoxArray ba,
int  ratio 
)

◆ coarsen() [4/11]

BoxList amrex::coarsen ( const BoxList bl,
int  ratio 
)

Returns a new BoxList in which each Box is coarsened by the given ratio.

◆ coarsen() [5/11]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::coarsen ( const IntVectND< dim > &  p,
int  s 
)
inlinenoexcept

Returns an IntVectND that is the component-wise integer projection of p by s.

◆ coarsen() [6/11]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::coarsen ( const IntVectND< dim > &  p1,
const IntVectND< dim > &  p2 
)
inlinenoexcept

Returns an IntVectND which is the component-wise integer projection of IntVectND p1 by IntVectND p2.

◆ coarsen() [7/11]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::coarsen ( Dim3 const &  fine,
IntVectND< dim > const &  ratio 
)
inlinenoexcept

◆ coarsen() [8/11]

Geometry amrex::coarsen ( Geometry const &  fine,
int  rr 
)
inline

◆ coarsen() [9/11]

Geometry amrex::coarsen ( Geometry const &  fine,
IntVect const &  rr 
)
inline

◆ coarsen() [10/11]

template<int ratio>
__host__ __device__ int amrex::coarsen ( int  i)
inlinenoexcept

◆ coarsen() [11/11]

__host__ __device__ int amrex::coarsen ( int  i,
int  ratio 
)
inlinenoexcept

◆ command_argument_count()

int amrex::command_argument_count ( )

◆ communicateParticlesFinish()

void amrex::communicateParticlesFinish ( const ParticleCopyPlan plan)

◆ communicateParticlesStart()

template<class PC , class SndBuffer , class RcvBuffer , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::communicateParticlesStart ( const PC &  pc,
ParticleCopyPlan plan,
const SndBuffer &  snd_buffer,
RcvBuffer &  rcv_buffer 
)

◆ complementIn() [1/3]

BoxArray amrex::complementIn ( const Box b,
const BoxArray ba 
)

Make a BoxArray from the complement of BoxArray ba in Box b.

◆ complementIn() [2/3]

BoxDomain amrex::complementIn ( const Box b,
const BoxDomain bl 
)

Returns the complement of BoxDomain bl in Box b.

◆ complementIn() [3/3]

BoxList amrex::complementIn ( const Box b,
const BoxList bl 
)

Returns a BoxList defining the complement of BoxList bl in Box b.

◆ computeDivergence()

void amrex::computeDivergence ( MultiFab divu,
const Array< MultiFab const *, 3 > &  umac,
const Geometry geom 
)

Computes divergence of face-data stored in the umac MultiFab.

◆ computeGradient()

void amrex::computeGradient ( MultiFab grad,
const Array< MultiFab const *, 3 > &  umac,
const Geometry geom 
)

Computes gradient of face-data stored in the umac MultiFab.

◆ computeNeighborProcs()

Vector< int > amrex::computeNeighborProcs ( const ParGDBBase a_gdb,
int  ngrow 
)

◆ computeRefFac()

IntVect amrex::computeRefFac ( const ParGDBBase a_gdb,
int  src_lev,
int  lev 
)

◆ computeResidual()

template<typename T , template< typename > class AllocM, typename AllocV >
void amrex::computeResidual ( AlgVector< T, AllocV > &  res,
SpMatrix< T, AllocM > const &  A,
AlgVector< T, AllocV > const &  x,
AlgVector< T, AllocV > const &  b 
)

res = b - A*x

◆ Concatenate()

std::string amrex::Concatenate ( const std::string &  root,
int  num,
int  mindigits 
)

Returns rootNNNN where NNNN == num.

◆ constexpr_for()

template<auto I, auto N, class F >
__host__ __device__ constexpr void amrex::constexpr_for ( F const &  f)
inlineconstexpr

◆ convert() [1/4]

BoxArray amrex::convert ( const BoxArray ba,
const IntVect typ 
)

◆ convert() [2/4]

BoxArray amrex::convert ( const BoxArray ba,
IndexType  typ 
)

◆ convert() [3/4]

template<int dim>
__host__ __device__ BoxND< dim > amrex::convert ( const BoxND< dim > &  b,
const IndexTypeND< dim > &  typ 
)
inlinenoexcept

◆ convert() [4/4]

template<int dim>
__host__ __device__ BoxND< dim > amrex::convert ( const BoxND< dim > &  b,
const IntVectND< dim > &  typ 
)
inlinenoexcept

Return a BoxND with different type.

◆ convexify()

Vector< MultiFab > amrex::convexify ( Vector< MultiFab const * > const &  mf,
Vector< IntVect > const &  refinement_ratio 
)

Convexify AMR data.

This function "convexifies" the AMR data by removing cells that are covered by fine levels from coarse level MultiFabs. This could be useful for visualization. The returned MultiFabs on coarse levels have different BoxArrays from the original BoxArrays. For the finest level, the data is simply copied to the returned object. The returned MultiFabs have no ghost cells. For nodal data, the nodes on the coarse/fine interface exist on both levels.

◆ Copy() [1/2]

template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type > >, int > BAR = 0>
void amrex::Copy ( FabArray< DFAB > &  dst,
FabArray< SFAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Copy() [2/2]

template<class DFAB , class SFAB , std::enable_if_t< std::conjunction_v< IsBaseFab< DFAB >, IsBaseFab< SFAB >, std::is_convertible< typename SFAB::value_type, typename DFAB::value_type > >, int > BAR = 0>
void amrex::Copy ( FabArray< DFAB > &  dst,
FabArray< SFAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ copyParticle() [1/2]

template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void amrex::copyParticle ( const ParticleTileData< T_ParticleType, NAR, NAI > &  dst,
const ConstParticleTileData< T_ParticleType, NAR, NAI > &  src,
int  src_i,
int  dst_i 
)
inlinenoexcept

A general single particle copying routine that can run on the GPU.

Template Parameters
NSRnumber of extra reals in the particle struct
NSInumber of extra ints in the particle struct
NARnumber of reals in the struct-of-arrays
NAInumber of ints in the struct-of-arrays
Parameters
dstthe destination tile
srcthe source tile
src_ithe index in the source to read from
dst_ithe index in the destination to write to

◆ copyParticle() [2/2]

template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void amrex::copyParticle ( const ParticleTileData< T_ParticleType, NAR, NAI > &  dst,
const ParticleTileData< T_ParticleType, NAR, NAI > &  src,
int  src_i,
int  dst_i 
)
inlinenoexcept

A general single particle copying routine that can run on the GPU.

Template Parameters
NSRnumber of extra reals in the particle struct
NSInumber of extra ints in the particle struct
NARnumber of reals in the struct-of-arrays
NAInumber of ints in the struct-of-arrays
Parameters
dstthe destination tile
srcthe source tile
src_ithe index in the source to read from
dst_ithe index in the destination to write to

◆ copyParticles() [1/2]

template<typename DstTile , typename SrcTile >
void amrex::copyParticles ( DstTile &  dst,
const SrcTile &  src 
)
noexcept

Copy particles from src to dst. This version copies all the particles, writing them to the beginning of dst.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Parameters
dstthe destination tile
srcthe source tile

◆ copyParticles() [2/2]

template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void amrex::copyParticles ( DstTile &  dst,
const SrcTile &  src,
Index  src_start,
Index  dst_start,
n 
)
noexcept

Copy particles from src to dst. This version copies n particles starting at index src_start, writing the result starting at dst_start.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Nthe size type, e.g. Long
Parameters
dstthe destination tile
srcthe source tile
src_startthe offset at which to start reading particles from src
dst_startthe offset at which to start writing particles to dst
nthe number of particles to write

◆ CountSnds()

Long amrex::CountSnds ( const std::map< int, Vector< char > > &  not_ours,
Vector< Long > &  Snds 
)

◆ CreateDirectoryFailed()

void amrex::CreateDirectoryFailed ( const std::string &  dir)

Output a message and abort when couldn't create the directory.

◆ CreateWriteHDF5Attr()

static int amrex::CreateWriteHDF5Attr ( hid_t  loc,
const char *  name,
hsize_t  n,
void *  data,
hid_t  dtype 
)
static

◆ CreateWriteHDF5AttrDouble()

static int amrex::CreateWriteHDF5AttrDouble ( hid_t  loc,
const char *  name,
hsize_t  n,
const double *  data 
)
static

◆ CreateWriteHDF5AttrInt()

static int amrex::CreateWriteHDF5AttrInt ( hid_t  loc,
const char *  name,
hsize_t  n,
const int data 
)
static

◆ CreateWriteHDF5AttrString() [1/2]

static int amrex::CreateWriteHDF5AttrString ( hid_t  loc,
const char *  name,
const char *  str 
)
static

◆ CreateWriteHDF5AttrString() [2/2]

static int amrex::CreateWriteHDF5AttrString ( hid_t  loc,
const char *  name,
const char *  str 
)
static

◆ cross_product()

__host__ __device__ XDim3 amrex::cross_product ( XDim3 const &  a,
XDim3 const &  b 
)
inline

◆ CRRBetweenLevels()

int amrex::CRRBetweenLevels ( int  fromlevel,
int  tolevel,
const Vector< int > &  refratios 
)

◆ DeallocateRandomSeedDevArray()

void amrex::DeallocateRandomSeedDevArray ( )

◆ decompose()

BoxArray amrex::decompose ( Box const &  domain,
int  nboxes,
Array< bool, 3 > const &  decomp = { true, true, true },
bool  no_overlap = false 
)

Decompose domain box into BoxArray.

The returned BoxArray has nboxes Boxes, unless the the domain is too small. We aim to decompose the domain into subdomains that are as cubic as possible, even if this results in Boxes with odd numbers of cells. Thus, this function is generally not suited for applications with multiple AMR levels or for multigrid solvers.

Parameters
domainDomain Box
nboxesthe target number of Boxes
decompcontrols whether domain decomposition should be done in that direction.
no_overlapoptional argument specifying whether nodal boxes can overlap

◆ DefaultGeometry()

const Geometry & amrex::DefaultGeometry ( )
inline

◆ demangle()

std::string amrex::demangle ( const char *  name)
inline

Demangle C++ name.

Demange C++ name if possible. For example

std::cout << amrex::demangle(typeid(box).name());
std::string demangle(const char *name)
Demangle C++ name.
Definition AMReX_Demangle.H:25

Demangling turns "N5amrex3BoxE" into "amrex::Box".

◆ diagShift()

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::diagShift ( const IntVectND< dim > &  p,
int  s 
)
inlineconstexprnoexcept

Returns IntVectND obtained by adding s to each of the components of this IntVectND.

◆ disableFPExcept()

FPExcept amrex::disableFPExcept ( FPExcept  excepts)

Disable FP exceptions. Linux Only.

This function disables given exception traps and keeps the status of the others. The example below disables FPE invalid and divide-by-zero, and later restores the previous settings.

// ....
setFPExcept(prev_excepts); // restore previous settings
FPExcept setFPExcept(FPExcept excepts)
Definition AMReX.cpp:1013
FPExcept disableFPExcept(FPExcept excepts)
Disable FP exceptions. Linux Only.
Definition AMReX.cpp:1030

◆ DistributionMap() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
DistributionMapping const & amrex::DistributionMap ( Array< MF, N > const &  mf)

◆ DistributionMap() [2/2]

DistributionMapping const & amrex::DistributionMap ( FabArrayBase const &  fa)

◆ Divide() [1/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Divide ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Divide() [2/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Divide ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ doHandShake()

Long amrex::doHandShake ( const std::map< int, Vector< char > > &  not_ours,
Vector< Long > &  Snds,
Vector< Long > &  Rcvs 
)

◆ doHandShakeLocal()

Long amrex::doHandShakeLocal ( const std::map< int, Vector< char > > &  not_ours,
const Vector< int > &  neighbor_procs,
Vector< Long > &  Snds,
Vector< Long > &  Rcvs 
)

◆ Dot() [1/5]

template<typename T , typename Allocator >
T amrex::Dot ( AlgVector< T, Allocator > const &  x,
AlgVector< T, Allocator > const &  y,
bool  local = false 
)

Return dot product of two vectors. By default, this returns the global result over all MPI ranks. If local is true, it returns the result over the locally stored entries only.

◆ Dot() [2/5]

template<typename FAB , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
FAB::value_type amrex::Dot ( FabArray< FAB > const &  x,
int  xcomp,
FabArray< FAB > const &  y,
int  ycomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

Compute dot products of two FabArrays.

Parameters
xfirst FabArray
xcompstarting component of x
ysecond FabArray
ycompstarting component of y
ncompnumber of components
nghostnumber of ghost cells
localIf true, MPI communication is skipped.

◆ Dot() [3/5]

template<typename FAB , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
FAB::value_type amrex::Dot ( FabArray< FAB > const &  x,
int  xcomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

Compute dot product of FabArray with itself.

Parameters
xFabArray
xcompstarting component of x
ncompnumber of components
nghostnumber of ghost cells
localIf true, MPI communication is skipped.

◆ Dot() [4/5]

template<typename IFAB , typename FAB , std::enable_if_t< IsBaseFab< FAB >::value &&IsBaseFab< IFAB >::value, int > FOO = 0>
FAB::value_type amrex::Dot ( FabArray< IFAB > const &  mask,
FabArray< FAB > const &  x,
int  xcomp,
FabArray< FAB > const &  y,
int  ycomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

Compute dot product of two FabArrays in region that mask is true.

Parameters
maskmask
xfirst FabArray
xcompstarting component of x
ysecond FabArray
ycompstarting component of y
ncompnumber of components
nghostnumber of ghost cells
localIf true, MPI communication is skipped.

◆ Dot() [5/5]

template<typename IFAB , typename FAB , std::enable_if_t< IsBaseFab< FAB >::value &&IsBaseFab< IFAB >::value, int > FOO = 0>
FAB::value_type amrex::Dot ( FabArray< IFAB > const &  mask,
FabArray< FAB > const &  x,
int  xcomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

Compute dot product of FabArray with itself in region that mask is true.

Parameters
maskmask
xFabArray
xcompstarting component of x
ncompnumber of components
nghostnumber of ghost cells
localIf true, MPI communication is skipped.

◆ dot_product()

__host__ __device__ Real amrex::dot_product ( XDim3 const &  a,
XDim3 const &  b 
)
inline

◆ dtoh_memcpy() [1/2]

template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::dtoh_memcpy ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src 
)

◆ dtoh_memcpy() [2/2]

template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::dtoh_memcpy ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  scomp,
int  dcomp,
int  ncomp 
)

◆ duplicateCSR()

template<typename C , typename T , template< typename > class AD, template< typename > class AS, std::enable_if_t< std::is_same_v< C, Gpu::HostToDevice >||std::is_same_v< C, Gpu::DeviceToHost >||std::is_same_v< C, Gpu::DeviceToDevice >, int > = 0>
void amrex::duplicateCSR ( C  c,
CSR< T, AD > &  dst,
CSR< T, AS > const &  src 
)

◆ EB_average_down() [1/3]

void amrex::EB_average_down ( const MultiFab S_fine,
MultiFab S_crse,
const MultiFab vol_fine,
const MultiFab vfrac_fine,
int  scomp,
int  ncomp,
const IntVect ratio 
)

◆ EB_average_down() [2/3]

void amrex::EB_average_down ( const MultiFab S_fine,
MultiFab S_crse,
int  scomp,
int  ncomp,
const IntVect ratio 
)

◆ EB_average_down() [3/3]

void amrex::EB_average_down ( const MultiFab S_fine,
MultiFab S_crse,
int  scomp,
int  ncomp,
int  ratio 
)

◆ EB_average_down_boundaries() [1/2]

void amrex::EB_average_down_boundaries ( const MultiFab fine,
MultiFab crse,
const IntVect ratio,
int  ngcrse 
)

◆ EB_average_down_boundaries() [2/2]

void amrex::EB_average_down_boundaries ( const MultiFab fine,
MultiFab crse,
int  ratio,
int  ngcrse 
)

◆ EB_average_down_faces() [1/3]

void amrex::EB_average_down_faces ( const Array< const MultiFab *, 3 > &  fine,
const Array< MultiFab *, 3 > &  crse,
const IntVect ratio,
const Geometry crse_geom 
)

◆ EB_average_down_faces() [2/3]

void amrex::EB_average_down_faces ( const Array< const MultiFab *, 3 > &  fine,
const Array< MultiFab *, 3 > &  crse,
const IntVect ratio,
int  ngcrse 
)

◆ EB_average_down_faces() [3/3]

void amrex::EB_average_down_faces ( const Array< const MultiFab *, 3 > &  fine,
const Array< MultiFab *, 3 > &  crse,
int  ratio,
int  ngcrse 
)

◆ EB_average_face_to_cellcenter()

void amrex::EB_average_face_to_cellcenter ( MultiFab ccmf,
int  dcomp,
const Array< MultiFab const *, 3 > &  fmf 
)

◆ EB_computeDivergence() [1/2]

void amrex::EB_computeDivergence ( MultiFab divu,
const Array< MultiFab const *, 3 > &  umac,
const Geometry geom,
bool  already_on_centroids 
)

◆ EB_computeDivergence() [2/2]

void amrex::EB_computeDivergence ( MultiFab divu,
const Array< MultiFab const *, 3 > &  umac,
const Geometry geom,
bool  already_on_centroids,
const MultiFab vel_eb 
)

◆ EB_interp_CC_to_Centroid()

void amrex::EB_interp_CC_to_Centroid ( MultiFab cent,
const MultiFab cc,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry geom 
)

◆ EB_interp_CC_to_FaceCentroid()

void amrex::EB_interp_CC_to_FaceCentroid ( const MultiFab cc,
MultiFab fc_x,
MultiFab fc_y,
MultiFab fc_z,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry a_geom,
const Vector< BCRec > &  a_bcs 
)

◆ EB_interp_CellCentroid_to_FaceCentroid() [1/3]

void amrex::EB_interp_CellCentroid_to_FaceCentroid ( const MultiFab phi_centroid,
const Array< MultiFab *, 3 > &  phi_faces,
int  scomp,
int  dcomp,
int  nc,
const Geometry geom,
const amrex::Vector< amrex::BCRec > &  a_bcs 
)

◆ EB_interp_CellCentroid_to_FaceCentroid() [2/3]

void amrex::EB_interp_CellCentroid_to_FaceCentroid ( const MultiFab phi_centroid,
const Vector< MultiFab * > &  phi_faces,
int  scomp,
int  dcomp,
int  nc,
const Geometry geom,
const amrex::Vector< amrex::BCRec > &  a_bcs 
)

◆ EB_interp_CellCentroid_to_FaceCentroid() [3/3]

void amrex::EB_interp_CellCentroid_to_FaceCentroid ( const MultiFab phi_centroid,
MultiFab phi_xface,
MultiFab phi_yface,
MultiFab phi_zface,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry a_geom,
const Vector< BCRec > &  a_bcs 
)

◆ EB_set_covered() [1/4]

void amrex::EB_set_covered ( MultiFab mf,
int  icomp,
int  ncomp,
const Vector< Real > &  vals 
)

◆ EB_set_covered() [2/4]

void amrex::EB_set_covered ( MultiFab mf,
int  icomp,
int  ncomp,
int  ngrow,
const Vector< Real > &  a_vals 
)

◆ EB_set_covered() [3/4]

void amrex::EB_set_covered ( MultiFab mf,
int  icomp,
int  ncomp,
int  ngrow,
Real  val 
)

◆ EB_set_covered() [4/4]

void amrex::EB_set_covered ( MultiFab mf,
Real  val 
)

◆ EB_set_covered_faces() [1/2]

void amrex::EB_set_covered_faces ( const Array< MultiFab *, 3 > &  umac,
const int  scomp,
const int  ncomp,
const Vector< Real > &  a_vals 
)

◆ EB_set_covered_faces() [2/2]

void amrex::EB_set_covered_faces ( const Array< MultiFab *, 3 > &  umac,
Real  val 
)

◆ EB_WriteMultiLevelPlotfile()

void amrex::EB_WriteMultiLevelPlotfile ( const std::string &  plotfilename,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ EB_WriteSingleLevelPlotfile()

void amrex::EB_WriteSingleLevelPlotfile ( const std::string &  plotfilename,
const MultiFab mf,
const Vector< std::string > &  varnames,
const Geometry geom,
Real  time,
int  level_step,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ elemwiseMax() [1/3]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::elemwiseMax ( const IntVectND< dim > &  p1,
const IntVectND< dim > &  p2 
)
inlineconstexprnoexcept

◆ elemwiseMax() [2/3]

template<class T , class ... Ts>
__host__ __device__ constexpr T amrex::elemwiseMax ( const T &  a,
const T &  b,
const Ts &...  c 
)
inlineconstexprnoexcept

Return the element-wise maximum of the given values for types like XDim3.

◆ elemwiseMax() [3/3]

template<class T >
__host__ __device__ constexpr T amrex::elemwiseMax ( T const &  a,
T const &  b 
)
inlineconstexprnoexcept

Return the element-wise maximum of the given values for types like XDim3.

◆ elemwiseMin() [1/3]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::elemwiseMin ( const IntVectND< dim > &  p1,
const IntVectND< dim > &  p2 
)
inlineconstexprnoexcept

◆ elemwiseMin() [2/3]

template<class T , class ... Ts>
__host__ __device__ constexpr T amrex::elemwiseMin ( const T &  a,
const T &  b,
const Ts &...  c 
)
inlineconstexprnoexcept

Return the element-wise minimum of the given values for types like XDim3.

◆ elemwiseMin() [3/3]

template<class T >
__host__ __device__ constexpr T amrex::elemwiseMin ( T const &  a,
T const &  b 
)
inlineconstexprnoexcept

Return the element-wise minimum of the given values for types like XDim3.

◆ enableFPExcept()

FPExcept amrex::enableFPExcept ( FPExcept  excepts)

Enable FP exceptions. Linux Only.

This function enables given exception traps and keeps the status of the others. The example below enables all FPE traps, and later restores the previous settings.

auto prev_excepts = enableFPExcept(FPExcept::all);
// ....
setFPExcept(prev_excepts); // restore previous settings
FPExcept enableFPExcept(FPExcept excepts)
Enable FP exceptions. Linux Only.
Definition AMReX.cpp:1045

◆ enclosedCells() [1/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::enclosedCells ( const BoxND< dim > &  b)
inlinenoexcept

Return a BoxND with CELL based coordinates in all directions that is enclosed by b.

◆ enclosedCells() [2/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::enclosedCells ( const BoxND< dim > &  b,
Direction  d 
)
inlinenoexcept

◆ enclosedCells() [3/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::enclosedCells ( const BoxND< dim > &  b,
int  dir 
)
inlinenoexcept

Return a BoxND with CELL based coordinates in direction dir that is enclosed by b. NOTE: equivalent to b.convert(dir,CELL) NOTE: error if b.type(dir) == CELL.

◆ end()

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::end ( BoxND< dim > const &  box)
inlinenoexcept

◆ end_iv()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::end_iv ( BoxND< dim > const &  box)
inlinenoexcept

◆ enforcePeriodic()

template<typename P >
__host__ __device__ bool amrex::enforcePeriodic ( P &  p,
amrex::GpuArray< amrex::Real, 3 > const &  plo,
amrex::GpuArray< amrex::Real, 3 > const &  phi,
amrex::GpuArray< amrex::ParticleReal, 3 > const &  rlo,
amrex::GpuArray< amrex::ParticleReal, 3 > const &  rhi,
amrex::GpuArray< int, 3 > const &  is_per 
)
inlinenoexcept

◆ EnsureThreadSafeTiles()

template<class PC >
void amrex::EnsureThreadSafeTiles ( PC &  pc)

◆ Error() [1/2]

__host__ __device__ void amrex::Error ( const char *  msg = nullptr)
inline

◆ Error() [2/2]

void amrex::Error ( const std::string &  msg)

Print out message to cerr and exit via amrex::Abort().

◆ Error_host()

void amrex::Error_host ( const char *  type,
const char *  msg 
)

◆ ErrorStream()

std::ostream & amrex::ErrorStream ( )

◆ ExecOnFinalize()

void amrex::ExecOnFinalize ( std::function< void()>  f)

We maintain a stack of functions that need to be called in Finalize(). The functions are called in LIFO order. The idea here is to allow classes to clean up any "global" state that they maintain when we're exiting from AMReX.

◆ ExecOnInitialize()

void amrex::ExecOnInitialize ( std::function< void()>  f)

◆ exp()

template<typename T >
__host__ __device__ GpuComplex< T > amrex::exp ( const GpuComplex< T > &  a_z)
inlinenoexcept

Complex expotential function.

◆ fab_filcc()

void amrex::fab_filcc ( Box const &  bx,
Array4< Real > const &  qn,
int  ncomp,
Box const &  domain,
Real const *  ,
Real const *  ,
BCRec const *  bcn 
)

◆ fab_filfc()

void amrex::fab_filfc ( Box const &  bx,
Array4< Real > const &  qn,
int  ncomp,
Box const &  domain,
Real const *  ,
Real const *  ,
BCRec const *  bcn 
)

◆ fab_filnd()

void amrex::fab_filnd ( Box const &  bx,
Array4< Real > const &  qn,
int  ncomp,
Box const &  domain,
Real const *  ,
Real const *  ,
BCRec const *  bcn 
)

◆ FileExists()

bool amrex::FileExists ( const std::string &  filename)

Check if a file already exists. Return true if the filename is an existing file, directory, or link. For links, this operates on the link and not what the link points to.

◆ FileOpenFailed()

void amrex::FileOpenFailed ( const std::string &  file)

Output a message and abort when couldn't open the file.

◆ fill()

template<typename STRUCT , typename F , std::enable_if_t<(sizeof(STRUCT)<=36 *8) &&std::is_trivially_copyable_v< STRUCT > &&std::is_trivially_destructible_v< STRUCT >, int > FOO = 0>
void amrex::fill ( BaseFab< STRUCT > &  aos_fab,
F const &  f 
)

◆ fill_snan()

template<RunOn run_on, typename T , std::enable_if_t< std::is_same_v< T, double >||std::is_same_v< T, float >, int > FOO = 0>
void amrex::fill_snan ( T *  p,
std::size_t  nelems 
)

◆ FillBoundary() [1/2]

template<class MF >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillBoundary ( Vector< MF * > const &  mf,
const Periodicity a_period = Periodicity::NonPeriodic() 
)

◆ FillBoundary() [2/2]

template<class MF >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillBoundary ( Vector< MF * > const &  mf,
Vector< int > const &  scomp,
Vector< int > const &  ncomp,
Vector< IntVect > const &  nghost,
Vector< Periodicity > const &  period,
Vector< int > const &  cross = {} 
)

◆ FillDomainBoundary()

void amrex::FillDomainBoundary ( MultiFab phi,
const Geometry geom,
const Vector< BCRec > &  bc 
)

◆ FillImpFunc()

template<typename G >
void amrex::FillImpFunc ( MultiFab mf,
G const &  gshop,
Geometry const &  geom 
)

Fill MultiFab with implicit function.

This function fills the nodal MultiFab with the implicit function in GeometryShop. Note that an implicit function is not necessarily a signed distance function.

Template Parameters
Gis the GeometryShop type
Parameters
mfis a nodal MultiFab.
gshopis a GeometryShop object.
geomis a Geometry object.

◆ FillNull() [1/2]

template<class T >
void amrex::FillNull ( Vector< std::unique_ptr< T > > &  a)

◆ FillNull() [2/2]

template<class T >
void amrex::FillNull ( Vector< T * > &  a)

◆ FillPatchInterp() [1/3]

template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > amrex::FillPatchInterp ( MF &  mf_fine_patch,
int  fcomp,
MF const &  mf_crse_patch,
int  ccomp,
int  ncomp,
IntVect const &  ng,
const Geometry cgeom,
const Geometry fgeom,
Box const &  dest_domain,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp 
)

◆ FillPatchInterp() [2/3]

template<typename MF >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchInterp ( MF &  mf_fine_patch,
int  fcomp,
MF const &  mf_crse_patch,
int  ccomp,
int  ncomp,
IntVect const &  ng,
const Geometry cgeom,
const Geometry fgeom,
Box const &  dest_domain,
const IntVect ratio,
InterpBase mapper,
const Vector< BCRec > &  bcs,
int  bcscomp 
)

◆ FillPatchInterp() [3/3]

void amrex::FillPatchInterp ( MultiFab mf_fine_patch,
int  fcomp,
MultiFab const &  mf_crse_patch,
int  ccomp,
int  ncomp,
IntVect const &  ng,
const Geometry cgeom,
const Geometry fgeom,
Box const &  dest_domain,
const IntVect ratio,
MFInterpolater mapper,
const Vector< BCRec > &  bcs,
int  bcscomp 
)

◆ FillPatchNLevels()

template<typename MF , typename BC , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchNLevels ( MF &  mf,
int  level,
const IntVect nghost,
Real  time,
const Vector< Vector< MF * > > &  smf,
const Vector< Vector< Real > > &  st,
int  scomp,
int  dcomp,
int  ncomp,
const Vector< Geometry > &  geom,
Vector< BC > &  bc,
int  bccomp,
const Vector< IntVect > &  ratio,
Interp *  mapper,
const Vector< BCRec > &  bcr,
int  bcrcomp 
)

FillPatch with data from AMR levels.

First, we try to fill the destination MultiFab/FabArray with this level's data if it's available. For the unfilled region, we try to fill with the coarse level below if it's available. Even coarser levels will be used if necessary till all regions are filled. This function is more expensive than FillPatchTwoLevels. So if one knows FillPatchTwoLevels can do the job because grids are properly nested, this function should be avoided.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
Parameters
mfdestination MF
levelAMR level associated with mf
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
smfsource MFs. The outer Vector is for AMR levels, whereas the inner Vector is for data at various times. It is not an error if the level for the destination MF is finer than data in smf (i.e., level >= smf.size()).
sttimes associated smf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
geomGeometry objects for AMR levels. The size must be big enough such that level < geom.size().
bcfunctors for physical boundaries on AMR levels. The size must be big enough such that level < bc.size().
bccompstarting component for bc
ratiorefinement ratio for AMR levels. The size must be big enough such that level < bc.size()-1.
mapperspatial interpolater
bcrboundary types for each component. We need this because some interpolaters need it.
bcrcompstarting component for bcr

◆ FillPatchSingleLevel() [1/3]

template<typename MF , typename BC >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchSingleLevel ( MF &  mf,
IntVect const &  nghost,
Real  time,
const Vector< MF * > &  smf,
const Vector< Real > &  stime,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry geom,
BC &  physbcf,
int  bcfcomp 
)

FillPatch with data from the current level.

The destination MultiFab/FabArray is on the same AMR level as the source MultiFab/FabArray. Usually this can only be used on AMR level 0, because filling fine level MF usually requires coarse level data. If needed, interpolation in time is performed.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Parameters
mfdestination MF
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
smfsource MFs
stimetimes associated smf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
geomGeometry for this level
physbcffunctor for physical boundaries
bcfcompstarting component for physbcf

◆ FillPatchSingleLevel() [2/3]

template<typename MF >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchSingleLevel ( MF &  mf,
IntVect const &  nghost,
Real  time,
const Vector< MF * > &  smf,
IntVect const &  snghost,
const Vector< Real > &  stime,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry geom 
)

FillPatch with data from the current level.

In this version of FillPatchSingleLevel, it's the CALLER's responsibility to make sure that smf has snghost ghost cells already filled before calling this function. The destination MultiFab/FabArray is on the same AMR level as the source MultiFab/FabArray. If needed, interpolation in time is performed.

Template Parameters
MFthe MultiFab/FabArray type
Parameters
mfdestination MF
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
smfsource MFs
snghostnumber of ghost cells in smf with valid data
stimetimes associated smf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
geomGeometry for this level

◆ FillPatchSingleLevel() [3/3]

template<typename MF , typename BC >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchSingleLevel ( MF &  mf,
Real  time,
const Vector< MF * > &  smf,
const Vector< Real > &  stime,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry geom,
BC &  physbcf,
int  bcfcomp 
)

FillPatch with data from the current level.

The destination MultiFab/FabArray is on the same AMR level as the source MultiFab/FabArray. Usually this can only be used on AMR level 0, because filling fine level MF usually requires coarse level data. If needed, interpolation in time is performed. All ghost cells of the destination MF are filled.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Parameters
mfdestination MF
timetime associated with mf
smfsource MFs
stimetimes associated smf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
geomGeometry for this level
physbcffunctor for physical boundaries
bcfcompstarting component for physbcf

◆ FillPatchTwoLevels() [1/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( Array< MF *, 3 > const &  mf,
IntVect const &  nghost,
Real  time,
const Vector< Array< MF *, 3 > > &  cmf,
const Vector< Real > &  ct,
const Vector< Array< MF *, 3 > > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
Array< BC, 3 > &  cbc,
const Array< int, 3 > &  cbccomp,
Array< BC, 3 > &  fbc,
const Array< int, 3 > &  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Array< Vector< BCRec >, 3 > &  bcs,
const Array< int, 3 > &  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.

First, we fill the destination MultiFab/FabArray's with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MFs, we fill them with the coarse level data using interpolation in space (and in time if needed).

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MFs on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MFs
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [2/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( Array< MF *, 3 > const &  mf,
IntVect const &  nghost,
Real  time,
const Vector< Array< MF *, 3 > > &  cmf,
const Vector< Real > &  ct,
const Vector< Array< MF *, 3 > > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
Array< BC, 3 > &  cbc,
int  cbccomp,
Array< BC, 3 > &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Array< Vector< BCRec >, 3 > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.

First, we fill the destination MultiFab/FabArray's with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MFs, we fill them with the coarse level data using interpolation in space (and in time if needed).

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MFs on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MFs
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [3/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( Array< MF *, 3 > const &  mf,
Real  time,
const Vector< Array< MF *, 3 > > &  cmf,
const Vector< Real > &  ct,
const Vector< Array< MF *, 3 > > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
Array< BC, 3 > &  cbc,
int  cbccomp,
Array< BC, 3 > &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Array< Vector< BCRec >, 3 > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

FillPatch for face variables with data from the current level and the level below. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.

First, we fill the destination MultiFab/FabArray's with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MFs, we fill them with the coarse level data using interpolation in space (and in time if needed). All ghost cells of the destination MFs are filled.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MFs on the fine level
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MFs
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [4/8]

template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( MF &  mf,
IntVect const &  nghost,
IntVect const &  nghost_outside_domain,
Real  time,
const Vector< MF * > &  cmf,
const Vector< Real > &  ct,
const Vector< MF * > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp 
)

FillPatch with data from the current level and the level below.

In this version of FillPatchTwoLevels, it's the CALLER's responsibility to make sure all ghost cells of the coarse MF needed for interpolation are filled already before calling this function. It's assumed that the fine level MultiFab mf's BoxArray is coarsenable by the refinement ratio. There is no support for EB.

Template Parameters
MFthe MultiFab/FabArray type
Interpspatial interpolater
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf inside domain needed to be filled
nghost_outside_domainnumber of ghost cells of mf outside domain needed to be filled
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component.
bcscompstarting component for bcs

◆ FillPatchTwoLevels() [5/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook , typename PostInterpHook >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( MF &  mf,
IntVect const &  nghost,
Real  time,
const EB2::IndexSpace index_space,
const Vector< MF * > &  cmf,
const Vector< Real > &  ct,
const Vector< MF * > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp,
const PostInterpHook &  post_interp 
)

FillPatch with data from the current level and the level below.

First, we fill the destination MultiFab/FabArray with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MF, we fill them with the coarse level data using interpolation in space (and in time if needed).

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
index_spaceEB IndexSpace
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [6/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( MF &  mf,
IntVect const &  nghost,
Real  time,
const Vector< MF * > &  cmf,
const Vector< Real > &  ct,
const Vector< MF * > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

FillPatch with data from the current level and the level below.

First, we fill the destination MultiFab/FabArray with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MF, we fill them with the coarse level data using interpolation in space (and in time if needed).

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [7/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook , typename PostInterpHook >
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( MF &  mf,
Real  time,
const EB2::IndexSpace index_space,
const Vector< MF * > &  cmf,
const Vector< Real > &  ct,
const Vector< MF * > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp,
const PostInterpHook &  post_interp 
)

FillPatch with data from the current level and the level below.

First, we fill the destination MultiFab/FabArray with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MF, we fill them with the coarse level data using interpolation in space (and in time if needed). All ghost cells of the destination MF are filled.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
timetime associated with mf
index_spaceEB IndexSpace
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillPatchTwoLevels() [8/8]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::FillPatchTwoLevels ( MF &  mf,
Real  time,
const Vector< MF * > &  cmf,
const Vector< Real > &  ct,
const Vector< MF * > &  fmf,
const Vector< Real > &  ft,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

FillPatch with data from the current level and the level below.

First, we fill the destination MultiFab/FabArray with the current level data as much as possible. This may include interpolation in time. For the rest of the destination MF, we fill them with the coarse level data using interpolation in space (and in time if needed). All ghost cells of the destination MF are filled.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
timetime associated with mf
cmfsource MFs on the coarse level
cttimes associated cmf
fmfsource MFs on the fine level
fttimes associated fmf
scompstarting component of the source MFs
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ FillRandom()

void amrex::FillRandom ( MultiFab mf,
int  scomp,
int  ncomp 
)

Fill MultiFab with random numbers from uniform distribution.

The uniform distribution range is [0.0, 1.0) for CPU and SYCL, it's (0,1] for CUDA and HIP. All cells including ghost cells are filled.

Parameters
mfMultiFab
scompstarting component
ncompnumber of component

◆ FillRandomNormal()

void amrex::FillRandomNormal ( MultiFab mf,
int  scomp,
int  ncomp,
Real  mean,
Real  stddev 
)

Fill MultiFab with random numbers from normal distribution.

All cells including ghost cells are filled.

Parameters
mfMultiFab
scompstarting component
ncompnumber of component
meanmean of normal distribution
stddevstandard deviation of normal distribution

◆ FillSignedDistance() [1/2]

void amrex::FillSignedDistance ( MultiFab mf,
bool  fluid_has_positive_sign = true 
)

Fill MultiFab with signed distance.

This function fills the nodal MultiFab with signed distance. Note that the distance is valid only if it's within a few cells to the EB. The MultiFab must have been built with an EBFArrayBoxFactory.

Parameters
mfis a nodal MultiFab built with EBFArrayBoxFactory.
fluid_has_positive_signdetermines the sign of the fluid.

◆ FillSignedDistance() [2/2]

void amrex::FillSignedDistance ( MultiFab mf,
EB2::Level const &  ls_lev,
EBFArrayBoxFactory const &  eb_fac,
int  refratio,
bool  fluid_has_positive_sign = true 
)

Fill MultiFab with signed distance.

This function fills the nodal MultiFab with signed distance. Note that the distance is valid only if it's within a few cells to the EB.

Parameters
mfis a nodal MultiFab.
ls_levis an EB2::Level object with an implicit function. This is at the same level as mf.
eb_facis an EBFArrayBoxFactory object containing EB information.
refratiois the refinement ratio of mf to eb_fac.
fluid_has_positive_signdetermines the sign of the fluid.

◆ filterAndTransformParticles() [1/6]

template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index amrex::filterAndTransformParticles ( DstTile &  dst,
const SrcTile &  src,
Index *  mask,
F &&  f 
)
noexcept

Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Fthe transform function type
Parameters
dstthe destination tile
srcthe source tile
maskpointer to the mask - 1 means copy, 0 means don't copy
fdefines the transformation that will be applied to the particles on copy

◆ filterAndTransformParticles() [2/6]

template<typename DstTile , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index amrex::filterAndTransformParticles ( DstTile &  dst,
const SrcTile &  src,
Index *  mask,
F const &  f,
Index  src_start,
Index  dst_start 
)
noexcept

Conditionally copy particles from src to dst based on the value of mask. A transformation will also be applied to the particles on copy.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Fthe transform function type
Parameters
dstthe destination tile
srcthe source tile
src_startstarting index of source
dst_startstarting index of destination
maskpointer to the mask - 1 means copy, 0 means don't copy
fdefines the transformation that will be applied to the particles on copy

◆ filterAndTransformParticles() [3/6]

template<typename DstTile , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int amrex::filterAndTransformParticles ( DstTile &  dst,
const SrcTile &  src,
Pred &&  p,
F &&  f 
)
noexcept

Conditionally copy particles from src to dst based on a predicate. A transformation will also be applied to the particles on copy.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Preda function object
Fthe transform function type
Parameters
dstthe destination tile
srcthe source tile
ppredicate function - particles will be copied if p returns true
fdefines the transformation that will be applied to the particles on copy

◆ filterAndTransformParticles() [4/6]

template<typename DstTile , typename SrcTile , typename Pred , typename F , typename Index , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, Index > nvccfoo = 0>
Index amrex::filterAndTransformParticles ( DstTile &  dst,
const SrcTile &  src,
Pred const &  p,
F &&  f,
Index  src_start,
Index  dst_start 
)
noexcept

Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Preda function object
Parameters
dstthe destination tile
srcthe source tile
ppredicate function - particles will be copied if p returns true
fthe function that will be applied to particles
src_startthe offset at which to start reading particles from src
dst_startthe offset at which to start writing particles to dst

◆ filterAndTransformParticles() [5/6]

template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index amrex::filterAndTransformParticles ( DstTile1 &  dst1,
DstTile2 &  dst2,
const SrcTile &  src,
Index *  mask,
F const &  f 
)
noexcept

Conditionally copy particles from src to dst1 and dst2 based on the value of mask. A transformation will also be applied to the particles on copy.

Template Parameters
DstTile1the dst1 particle tile type
DstTile2the dst2 particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Fthe transform function type
Parameters
dst1the first destination tile
dst2the second destination tile
srcthe source tile
maskpointer to the mask - 1 means copy, 0 means don't copy
fdefines the transformation that will be applied to the particles on copy

◆ filterAndTransformParticles() [6/6]

template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Pred , typename F , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int amrex::filterAndTransformParticles ( DstTile1 &  dst1,
DstTile2 &  dst2,
const SrcTile &  src,
Pred const &  p,
F &&  f 
)
noexcept

Conditionally copy particles from src to dst1 and dst2 based on a predicate. A transformation will also be applied to the particles on copy.

Template Parameters
DstTile1the dst1 particle tile type
DstTile2the dst2 particle tile type
SrcTilethe src particle tile type
Preda function object
Fthe transform function type
Parameters
dst1the first destination tile
dst2the second destination tile
srcthe source tile
ppredicate function - particles will be copied if p returns true
fdefines the transformation that will be applied to the particles on copy

◆ filterParticles() [1/4]

template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index amrex::filterParticles ( DstTile &  dst,
const SrcTile &  src,
const Index *  mask 
)
noexcept

Conditionally copy particles from src to dst based on the value of mask.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Parameters
dstthe destination tile
srcthe source tile
maskpointer to the mask - 1 means copy, 0 means don't copy

◆ filterParticles() [2/4]

template<typename DstTile , typename SrcTile , typename Index , typename N , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
Index amrex::filterParticles ( DstTile &  dst,
const SrcTile &  src,
const Index *  mask,
Index  src_start,
Index  dst_start,
n 
)
noexcept

Conditionally copy particles from src to dst based on the value of mask. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Parameters
dstthe destination tile
srcthe source tile
maskpointer to the mask - 1 means copy, 0 means don't copy
src_startthe offset at which to start reading particles from src
dst_startthe offset at which to start writing particles to dst
nthe number of particles to apply the operation to

◆ filterParticles() [3/4]

template<typename DstTile , typename SrcTile , typename Pred , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, int > foo = 0>
int amrex::filterParticles ( DstTile &  dst,
const SrcTile &  src,
Pred &&  p 
)
noexcept

Conditionally copy particles from src to dst based on a predicate.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Preda function object
Parameters
dstthe destination tile
srcthe source tile
ppredicate function - particles will be copied if p returns true

◆ filterParticles() [4/4]

template<typename DstTile , typename SrcTile , typename Pred , typename Index , typename N , std::enable_if_t<!std::is_pointer_v< std::decay_t< Pred > >, Index > nvccfoo = 0>
Index amrex::filterParticles ( DstTile &  dst,
const SrcTile &  src,
Pred const &  p,
Index  src_start,
Index  dst_start,
n 
)
noexcept

Conditionally copy particles from src to dst based on a predicate. This version conditionally copies n particles starting at index src_start, writing the result starting at dst_start.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Preda function object
Parameters
dstthe destination tile
srcthe source tile
ppredicate function - particles will be copied if p returns true
src_startthe offset at which to start reading particles from src
dst_startthe offset at which to start writing particles to dst
nthe number of particles to apply the operation to

◆ Finalize() [1/2]

void amrex::Finalize ( )

◆ Finalize() [2/2]

void amrex::Finalize ( amrex::AMReX pamrex)

◆ Finalize_FFT()

void amrex::Finalize_FFT ( )
inline

If Init_FFT is called, this should be called after all the FFT works are done.

◆ Finalize_minimal()

void amrex::Finalize_minimal ( )

◆ For() [1/31]

template<int MT, typename L , int dim>
void amrex::For ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ For() [2/31]

template<typename L , int dim>
void amrex::For ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ For() [3/31]

template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::For ( BoxND< dim > const &  box,
L const &  f 
)
noexcept

◆ For() [4/31]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ For() [5/31]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ For() [6/31]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::For ( BoxND< dim > const &  box,
ncomp,
L const &  f 
)
noexcept

◆ For() [7/31]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::For ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ For() [8/31]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::For ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ For() [9/31]

template<typename L1 , typename L2 , int dim>
void amrex::For ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ For() [10/31]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::For ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ For() [11/31]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::For ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ For() [12/31]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::For ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ For() [13/31]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::For ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ For() [14/31]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::For ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ For() [15/31]

template<typename L , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ For() [16/31]

template<int MT, typename L , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ For() [17/31]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ For() [18/31]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ For() [19/31]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ For() [20/31]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ For() [21/31]

template<typename L1 , typename L2 , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ For() [22/31]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ For() [23/31]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ For() [24/31]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ For() [25/31]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ For() [26/31]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::For ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ For() [27/31]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ For() [28/31]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ For() [29/31]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( n,
L &&  f 
)
noexcept

◆ For() [30/31]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::For ( n,
L &&  f 
)
noexcept

◆ For() [31/31]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::For ( n,
L const &  f 
)
noexcept

◆ ForEach() [1/6]

template<typename... Ts, typename F >
constexpr void amrex::ForEach ( TypeList< Ts... >  ,
F &&  f 
)
constexpr

For each type t in TypeList, call f(t)

For example, instead of

int order = ...;
if (order == 1) {
interp<1>(...);
} else if (order == 2) {
interp<2>(...);
} else if (order == 4) {
interp<4>(...);
}

we could have

int order = ...;
ForEach(TypeList<std::integral_constant<int,1>,
std::integral_constant<int,2>,
std::integral_constant<int,4>>{},
[&] (auto order_const) {
if (order_const() == order) {
interp<order_const()>(...);
}
});
constexpr void ForEach(TypeList< Ts... >, F &&f)
For each type t in TypeList, call f(t)
Definition AMReX_TypeList.H:82

◆ ForEach() [2/6]

template<typename V1 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value > amrex::ForEach ( V1 &  x,
F const &  f 
)

◆ ForEach() [3/6]

template<typename V1 , typename V2 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value && IsAlgVector< std::decay_t< V2 > >::value > amrex::ForEach ( V1 &  x,
V2 &  y,
F const &  f 
)

◆ ForEach() [4/6]

template<typename V1 , typename V2 , typename V3 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value && IsAlgVector< std::decay_t< V2 > >::value && IsAlgVector< std::decay_t< V3 > >::value > amrex::ForEach ( V1 &  x,
V2 &  y,
V3 &  z,
F const &  f 
)

◆ ForEach() [5/6]

template<typename V1 , typename V2 , typename V3 , typename V4 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value && IsAlgVector< std::decay_t< V2 > >::value && IsAlgVector< std::decay_t< V3 > >::value && IsAlgVector< std::decay_t< V4 > >::value > amrex::ForEach ( V1 &  x,
V2 &  y,
V3 &  z,
V4 &  a,
F const &  f 
)

◆ ForEach() [6/6]

template<typename V1 , typename V2 , typename V3 , typename V4 , typename V5 , typename F >
std::enable_if_t< IsAlgVector< std::decay_t< V1 > >::value && IsAlgVector< std::decay_t< V2 > >::value && IsAlgVector< std::decay_t< V3 > >::value && IsAlgVector< std::decay_t< V4 > >::value && IsAlgVector< std::decay_t< V5 > >::value > amrex::ForEach ( V1 &  x,
V2 &  y,
V3 &  z,
V4 &  a,
V5 &  b,
F const &  f 
)

◆ ForEachUntil()

template<typename... Ts, typename F >
constexpr bool amrex::ForEachUntil ( TypeList< Ts... >  ,
F &&  f 
)
constexpr

For each type t in TypeList, call f(t) until true is returned.

This behaves like return (f(t0) || f(t1) || f(t2) || ...). Note that shor-circuting occurs for the || operators.

An example,

void AnyF (Any& dst, Any const& src) {
// dst and src are either MultiFab or fMultiFab
TypeList<MultiFab,fMultiFab>{});
bool r = ForEachUntil(tt, [&] (auto t) -> bool
{
using MF0 = TypeAt<0,decltype(t)>;
using MF1 = TypeAt<1,decltype(t)>;
if (dst.is<MF0>() && src.is<MF1>()) {
MF0 & dmf = dst.get<MF0>();
MF1 const& smf = src.get<MF1>();
f(dmf, smf);
return true;
} else {
return false;
}
});
if (!r) { amrex::Abort("Unsupported types"); }
}
Definition AMReX_Any.H:17
bool is() const
Definition AMReX_Any.H:78
MF & get()
Returns a reference to the contained object.
Definition AMReX_Any.H:55
typename detail::TypeListGet< I, T >::type TypeAt
Type at position I of a TypeList.
Definition AMReX_TypeList.H:34
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
constexpr bool ForEachUntil(TypeList< Ts... >, F &&f)
For each type t in TypeList, call f(t) until true is returned.
Definition AMReX_TypeList.H:119

◆ ForwardAsTuple()

template<typename... Ts>
__host__ __device__ constexpr GpuTuple< Ts &&... > amrex::ForwardAsTuple ( Ts &&...  args)
constexprnoexcept

◆ FourthOrderInterpFromFineToCoarse()

void amrex::FourthOrderInterpFromFineToCoarse ( MultiFab cmf,
int  scomp,
int  ncomp,
MultiFab const &  fmf,
IntVect const &  ratio 
)

Fourth-order interpolation from fine to coarse level.

This is for high-order "average-down" of finite-difference data. If ghost cell data are used, it's the caller's responsibility to fill the ghost cells before calling this function.

Parameters
cmfcoarse data
scompstarting component
ncompnumber of component
fmffine data
ratiorefinement ratio.

◆ gatherParticles()

template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void amrex::gatherParticles ( PTile &  dst,
const PTile &  src,
np,
const Index *  inds 
)

Gather particles copies particles into contiguous order from an arbitrary order. Specifically, the particle at the index inds[i] in src will be copied to the index i in dst.

Template Parameters
PTilethe particle tile type
Nthe size type, e.g. Long
Indexthe index type, e.g. unsigned int
Parameters
dstthe destination tile
srcthe source tile
npthe number of particles
indspointer to the permutation array

◆ GccPlacater()

void amrex::GccPlacater ( )
inline

◆ GccPlacaterMF()

void amrex::GccPlacaterMF ( )
inline

◆ get() [1/4]

template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type && amrex::get ( GpuTuple< Ts... > &&  tup)
constexprnoexcept

◆ get() [2/4]

template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & amrex::get ( GpuTuple< Ts... > &  tup)
constexprnoexcept

◆ get() [3/4]

template<std::size_t I, typename... Ts>
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type const & amrex::get ( GpuTuple< Ts... > const &  tup)
constexprnoexcept

◆ get() [4/4]

template<std::size_t I, int dim>
__host__ __device__ constexpr int amrex::get ( IntVectND< dim > const &  iv)
inlineconstexprnoexcept

Get I'th element of IntVectND<dim>

◆ get_cell_data()

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0>
Vector< typename MF::value_type > amrex::get_cell_data ( MF const &  mf,
IntVect const &  cell 
)

Get data in a cell of MultiFab/FabArray.

This returns a Vector containing the data in a given cell, if it's available on a process. The returned Vector is empty if a process does not have the given cell.

◆ get_command()

std::string amrex::get_command ( )

◆ get_command_argument()

std::string amrex::get_command_argument ( int  number)

Get command line arguments. The executable name is the zero-th argument. Return empty string if there are not that many arguments. std::string.

◆ get_line_data()

template<typename MF , std::enable_if_t< IsFabArray< MF >::value, int > FOO = 0>
MF amrex::get_line_data ( MF const &  mf,
int  dir,
IntVect const &  cell,
Box const &  bnd_bx = Box() 
)

Get data in a line of MultiFab/FabArray.

This returns a MultiFab/FabArray containing the data in a line specified by a direction and a cell.

◆ get_slice_data()

std::unique_ptr< MultiFab > amrex::get_slice_data ( int  dir,
Real  coord,
const MultiFab cc,
const Geometry geom,
int  start_comp,
int  ncomp,
bool  interpolate = false,
RealBox const &  bnd_rbx = RealBox() 
)

Extract a slice from the given cell-centered MultiFab at coordinate "coord" along direction "dir".

◆ GetArrOfConstPtrs() [1/3]

template<class T >
std::array< T const *, 3 > amrex::GetArrOfConstPtrs ( const std::array< std::unique_ptr< T >, 3 > &  a)
noexcept

◆ GetArrOfConstPtrs() [2/3]

template<class T >
std::array< T const *, 3 > amrex::GetArrOfConstPtrs ( const std::array< T *, 3 > &  a)
noexcept

◆ GetArrOfConstPtrs() [3/3]

template<class T >
std::array< T const *, 3 > amrex::GetArrOfConstPtrs ( const std::array< T, 3 > &  a)
noexcept

◆ GetArrOfPtrs() [1/2]

template<class T >
std::array< T *, 3 > amrex::GetArrOfPtrs ( const std::array< std::unique_ptr< T >, 3 > &  a)
noexcept

◆ GetArrOfPtrs() [2/2]

template<class T , typename = typename T::FABType>
std::array< T *, 3 > amrex::GetArrOfPtrs ( std::array< T, 3 > &  a)
noexcept

◆ GetBndryCells()

BoxList amrex::GetBndryCells ( const BoxArray ba,
int  ngrow 
)

Find the ghost cells of a given BoxArray.

◆ getCell()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::getCell ( BoxND< dim > const *  boxes,
int  nboxes,
Long  icell 
)
inlinenoexcept

◆ getDefaultCompNameInt()

template<typename P >
std::string amrex::getDefaultCompNameInt ( const int  i)

◆ getDefaultCompNameReal()

template<typename P >
std::string amrex::getDefaultCompNameReal ( const int  i)

◆ getEBCellFlagFab()

const EBCellFlagFab & amrex::getEBCellFlagFab ( const FArrayBox fab)

◆ getEnum()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
T amrex::getEnum ( std::string_view const &  s)

Convert a string to an enum value

Example:

++
AMREX_ENUM(Model,
linear,
nonlinear
);
std::string const model_str = "nonlinear";
Model const model = amrex::getEnum<Model>(model_str);
assert(model == Model::nonlinear);
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:208

◆ getEnumCaseInsensitive()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
T amrex::getEnumCaseInsensitive ( std::string_view const &  s)

Convert a string case insensitive to an enum value

Same as getEnum<T>, but case insensitive match to enum value.

Example:

++
AMREX_ENUM(Model,
linear,
nonlinear
);
std::string const model_str = "NonLinear";
Model const model = amrex::getEnumCaseInsensitive<Model>(model_str);
assert(model == Model::nonlinear);

◆ getEnumClassName()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::string amrex::getEnumClassName ( )

◆ getEnumNameString()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::string amrex::getEnumNameString ( T const &  v)

Get a string from an enum value

Example:

++
AMREX_ENUM(Model,
linear,
nonlinear
);
Model model = Model::linear;
std::string model_str = amrex::getEnumNameString(model);
assert(model_str == "linear");
std::string getEnumNameString(T const &v)
Definition AMReX_Enum.H:156

◆ getEnumNameStrings()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::vector< std::string > amrex::getEnumNameStrings ( )

◆ getEnumNameValuePairs()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
std::vector< std::pair< std::string, T > > const & amrex::getEnumNameValuePairs ( )

◆ getFPExcept()

FPExcept amrex::getFPExcept ( )

Return currently enabled FP exceptions. Linux only.

◆ getIndexBounds() [1/3]

template<int dim>
BoxND< dim > amrex::getIndexBounds ( BoxND< dim > const &  b1)
inlinenoexcept

◆ getIndexBounds() [2/3]

template<int dim>
BoxND< dim > amrex::getIndexBounds ( BoxND< dim > const &  b1,
BoxND< dim > const &  b2 
)
inlinenoexcept

◆ getIndexBounds() [3/3]

template<class T , class ... Ts>
auto amrex::getIndexBounds ( T const &  b1,
T const &  b2,
Ts const &...  b3 
)
inlinenoexcept

◆ getInvalidRandomEngine()

RandomEngine amrex::getInvalidRandomEngine ( )
inline

◆ getParticleCell() [1/3]

template<typename P >
__host__ __device__ IntVect amrex::getParticleCell ( P const &  p,
amrex::GpuArray< amrex::Real, 3 > const &  plo,
amrex::GpuArray< amrex::Real, 3 > const &  dxi 
)
inlinenoexcept

Returns the cell index for a given particle using the provided lower bounds and cell sizes.

This version indexes cells starting from 0 at the lower left corner of the provided lower bounds, i.e., it returns a local index.

Template Parameters
Pa type of AMReX particle.
Parameters
pthe particle for which the cell index is calculated
plothe low end of the domain
dxicell sizes in each dimension

◆ getParticleCell() [2/3]

template<typename P >
__host__ __device__ IntVect amrex::getParticleCell ( P const &  p,
amrex::GpuArray< amrex::Real, 3 > const &  plo,
amrex::GpuArray< amrex::Real, 3 > const &  dxi,
const Box domain 
)
inlinenoexcept

Returns the cell index for a given particle using the provided lower bounds, cell sizes and global domain offset.

This version indexes cells starting from 0 at the lower left corner of the simulation geometry, i.e., it returns a global index.

Template Parameters
Pa type of AMReX particle.
Parameters
pthe particle for which the cell index is calculated
plothe low end of the domain
dxicell sizes in each dimension
domainAMReX box in which the given particle resides

◆ getParticleCell() [3/3]

template<typename PTD >
__host__ __device__ IntVect amrex::getParticleCell ( PTD const &  ptd,
int  i,
amrex::GpuArray< amrex::Real, 3 > const &  plo,
amrex::GpuArray< amrex::Real, 3 > const &  dxi,
const Box domain 
)
inlinenoexcept

◆ getParticleGrid()

template<typename P >
__host__ __device__ int amrex::getParticleGrid ( P const &  p,
amrex::Array4< int > const &  mask,
amrex::GpuArray< amrex::Real, 3 > const &  plo,
amrex::GpuArray< amrex::Real, 3 > const &  dxi,
const Box domain 
)
inlinenoexcept

◆ getRandState()

randState_t * amrex::getRandState ( )
inline

◆ getTileIndex()

__host__ __device__ int amrex::getTileIndex ( const IntVect iv,
const Box box,
const bool  a_do_tiling,
const IntVect a_tile_size,
Box tbx 
)
inline

◆ GetVecOfArrOfConstPtrs() [1/2]

template<class T >
Vector< std::array< T const *, 3 > > amrex::GetVecOfArrOfConstPtrs ( const Vector< std::array< std::unique_ptr< T >, 3 > > &  a)

◆ GetVecOfArrOfConstPtrs() [2/2]

template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0>
Vector< std::array< T const *, 3 > > amrex::GetVecOfArrOfConstPtrs ( const Vector< std::array< T, 3 > > &  a)

◆ GetVecOfArrOfPtrs() [1/2]

template<class T >
Vector< std::array< T *, 3 > > amrex::GetVecOfArrOfPtrs ( const Vector< std::array< std::unique_ptr< T >, 3 > > &  a)

◆ GetVecOfArrOfPtrs() [2/2]

template<class T , std::enable_if_t< IsFabArray< T >::value||IsBaseFab< T >::value, int > = 0>
Vector< std::array< T *, 3 > > amrex::GetVecOfArrOfPtrs ( Vector< std::array< T, 3 > > &  a)

◆ GetVecOfArrOfPtrsConst()

template<class T >
Vector< std::array< T const *, 3 > > amrex::GetVecOfArrOfPtrsConst ( const Vector< std::array< std::unique_ptr< T >, 3 > > &  a)

◆ GetVecOfConstPtrs() [1/4]

template<class T >
Vector< const T * > amrex::GetVecOfConstPtrs ( const Vector< std::unique_ptr< T > > &  a)

◆ GetVecOfConstPtrs() [2/4]

template<class T , typename = typename T::FABType>
Vector< const T * > amrex::GetVecOfConstPtrs ( const Vector< T * > &  a)

◆ GetVecOfConstPtrs() [3/4]

template<class T , typename = typename T::FABType>
Vector< const T * > amrex::GetVecOfConstPtrs ( const Vector< T > &  a)

◆ GetVecOfConstPtrs() [4/4]

template<class T , std::size_t N, typename = typename T::FABType>
Vector< Array< T, N > const * > amrex::GetVecOfConstPtrs ( Vector< Array< T, N > > const &  a)

◆ GetVecOfPtrs() [1/3]

template<class T >
Vector< T * > amrex::GetVecOfPtrs ( const Vector< std::unique_ptr< T > > &  a)

◆ GetVecOfPtrs() [2/3]

template<class T , std::size_t N, typename = typename T::FABType>
Vector< Array< T, N > * > amrex::GetVecOfPtrs ( Vector< Array< T, N > > &  a)

◆ GetVecOfPtrs() [3/3]

template<class T , typename = typename T::FABType>
Vector< T * > amrex::GetVecOfPtrs ( Vector< T > &  a)

◆ GetVecOfVecOfPtrs()

template<class T >
Vector< Vector< T * > > amrex::GetVecOfVecOfPtrs ( const Vector< Vector< std::unique_ptr< T > > > &  a)

◆ gpuGetErrorString()

const char * amrex::gpuGetErrorString ( gpuError_t  error)
inline

◆ gpuGetLastError()

gpuError_t amrex::gpuGetLastError ( )
inline

◆ grow_podvector_capacity()

std::size_t amrex::grow_podvector_capacity ( GrowthStrategy  strategy,
std::size_t  new_size,
std::size_t  old_capacity,
std::size_t  sizeof_T 
)
inline

◆ growHi() [1/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::growHi ( const BoxND< dim > &  b,
Direction  d,
int  n_cell 
)
inlinenoexcept

◆ growHi() [2/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::growHi ( const BoxND< dim > &  b,
int  idir,
int  n_cell 
)
inlinenoexcept

◆ growLo() [1/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::growLo ( const BoxND< dim > &  b,
Direction  d,
int  n_cell 
)
inlinenoexcept

◆ growLo() [2/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::growLo ( const BoxND< dim > &  b,
int  idir,
int  n_cell 
)
inlinenoexcept

◆ hash_combine()

template<typename T >
void amrex::hash_combine ( uint64_t &  seed,
const T &  val 
)
noexcept

◆ hash_vector()

template<typename T >
uint64_t amrex::hash_vector ( const Vector< T > &  vec,
uint64_t  seed = 0xDEADBEEFDEADBEEF 
)
noexcept

◆ HostDeviceFor() [1/28]

template<typename L , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceFor() [2/28]

template<int MT, typename L , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceFor() [3/28]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceFor() [4/28]

template<int MT, typename T , int dim, typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceFor() [5/28]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [6/28]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [7/28]

template<typename L1 , typename L2 , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [8/28]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [9/28]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [10/28]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [11/28]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [12/28]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [13/28]

template<typename L , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceFor() [14/28]

template<int MT, typename L , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceFor() [15/28]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceFor() [16/28]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceFor() [17/28]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [18/28]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [19/28]

template<typename L1 , typename L2 , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [20/28]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [21/28]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [22/28]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceFor() [23/28]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [24/28]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceFor() [25/28]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ HostDeviceFor() [26/28]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ HostDeviceFor() [27/28]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( n,
L &&  f 
)
noexcept

◆ HostDeviceFor() [28/28]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceFor ( n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [1/43]

template<typename L , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [2/43]

template<int MT, typename L , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [3/43]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [4/43]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [5/43]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [6/43]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [7/43]

template<typename L1 , typename L2 , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [8/43]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [9/43]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [10/43]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [11/43]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [12/43]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceParallelFor ( BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [13/43]

template<typename L , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [14/43]

template<int MT, typename L , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [15/43]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [16/43]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [17/43]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [18/43]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [19/43]

template<typename L1 , typename L2 , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [20/43]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [21/43]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [22/43]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [23/43]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [24/43]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [25/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [26/43]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  ,
n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [27/43]

template<typename L , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [28/43]

template<int MT, typename L , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [29/43]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [30/43]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [31/43]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [32/43]

template<typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [33/43]

template<int MT, typename L1 , typename L2 , int dim>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [34/43]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [35/43]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ HostDeviceParallelFor() [36/43]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [37/43]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ HostDeviceParallelFor() [38/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [39/43]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( Gpu::KernelInfo const &  info,
n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [40/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [41/43]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::HostDeviceParallelFor ( n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [42/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( n,
L &&  f 
)
noexcept

◆ HostDeviceParallelFor() [43/43]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
std::enable_if_t< MaybeHostDeviceRunnable< L >::value > amrex::HostDeviceParallelFor ( n,
L &&  f 
)
noexcept

◆ htod_memcpy() [1/2]

template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::htod_memcpy ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src 
)

◆ htod_memcpy() [2/2]

template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::htod_memcpy ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  scomp,
int  dcomp,
int  ncomp 
)

◆ IdentityTuple() [1/2]

template<typename... Ts, typename... Ps>
__host__ __device__ constexpr GpuTuple< Ts... > amrex::IdentityTuple ( GpuTuple< Ts... >  ,
ReduceOps< Ps... >   
)
constexprnoexcept

Return a GpuTuple containing the identity element for each operation in ReduceOps. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively.

◆ IdentityTuple() [2/2]

template<typename... Ts, typename... Ps>
__host__ __device__ constexpr GpuTuple< Ts... > amrex::IdentityTuple ( GpuTuple< Ts... >  ,
TypeList< Ps... >   
)
constexprnoexcept

Return a GpuTuple containing the identity element for each ReduceOp in TypeList. For example 0, +inf and -inf for ReduceOpSum, ReduceOpMin and ReduceOpMax respectively.

◆ ignore_unused()

template<class... Ts>
__host__ __device__ void amrex::ignore_unused ( const Ts &  ...)
inline

This shuts up the compiler about unused variables.

◆ indexFromValue()

template<class FAB , class foo = std::enable_if_t<IsBaseFab<FAB>::value>>
IntVect amrex::indexFromValue ( FabArray< FAB > const &  mf,
int  comp,
IntVect const &  nghost,
typename FAB::value_type  value 
)

◆ IndexTypeCat()

template<int d, int... dims>
__host__ __device__ constexpr IndexTypeND< detail::get_sum< d, dims... >()> amrex::IndexTypeCat ( const IndexTypeND< d > &  v,
const IndexTypeND< dims > &...  vects 
)
inlineconstexprnoexcept

Returns a IndexTypeND obtained by concatenating the input IndexTypeNDs. The dimension of the return value equals the sum of the dimensions of the inputted IndexTypeNDs.

◆ IndexTypeExpand()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > amrex::IndexTypeExpand ( const IndexTypeND< old_dim > &  v,
IndexType::CellIndex  fill_extra = IndexType::CellIndex::CELL 
)
inlineconstexprnoexcept

Returns a new IndexTypeND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.

◆ IndexTypeND() [1/2]

template<int dim>
__host__ __device__ amrex::IndexTypeND ( const IntVectND< dim > &  ) -> IndexTypeND< dim >

◆ IndexTypeND() [2/2]

template<class... Args, std::enable_if_t< IsConvertible_v< IndexType::CellIndex, Args... >, int > = 0>
__host__ __device__ amrex::IndexTypeND ( IndexType::CellIndex  ,
Args...   
) -> IndexTypeND< sizeof...(Args)+1 >

◆ IndexTypeResize()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > amrex::IndexTypeResize ( const IndexTypeND< old_dim > &  v,
IndexType::CellIndex  fill_extra = IndexType::CellIndex::CELL 
)
inlineconstexprnoexcept

Returns a new IndexTypeND of size new_dim by either shrinking or expanding iv.

◆ IndexTypeShrink()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IndexTypeND< new_dim > amrex::IndexTypeShrink ( const IndexTypeND< old_dim > &  v)
inlineconstexprnoexcept

Returns a new IndexTypeND of size new_dim and assigns the first new_dim values of v to it.

◆ IndexTypeSplit()

template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< IndexTypeND< d >, IndexTypeND< dims >... > amrex::IndexTypeSplit ( const IndexTypeND< detail::get_sum< d, dims... >()> &  v)
inlineconstexprnoexcept

Returns a tuple of IndexTypeND obtained by splitting the input IndexTypeND according to the dimensions specified by the template arguments.

◆ Init_FFT()

void amrex::Init_FFT ( MPI_Comm  comm)
inline

Initialize FFT.

This is needed only when the user wants to use amrex::FFT, but does not want to call amrex::Initialize to initialize the full version of AMReX. Note that one usually only needs to call Init_FFT and Finalize_FFT once in the entire program.

◆ Init_minimal()

void amrex::Init_minimal ( MPI_Comm  mpi_comm)

◆ Initialize() [1/3]

amrex::AMReX * amrex::Initialize ( int argc,
char **&  argv,
bool  build_parm_parse = true,
MPI_Comm  mpi_comm = MPI_COMM_WORLD,
const std::function< void()> &  func_parm_parse = {},
std::ostream &  a_osout = std::cout,
std::ostream &  a_oserr = std::cerr,
ErrorHandler  a_errhandler = nullptr,
int  a_device_id = -1 
)

◆ Initialize() [2/3]

amrex::AMReX * amrex::Initialize ( int argc,
char **&  argv,
const std::function< void()> &  func_parm_parse,
std::ostream &  a_osout = std::cout,
std::ostream &  a_oserr = std::cerr,
ErrorHandler  a_errhandler = nullptr,
int  a_device_id = -1 
)

◆ Initialize() [3/3]

amrex::AMReX * amrex::Initialize ( MPI_Comm  mpi_comm,
std::ostream &  a_osout = std::cout,
std::ostream &  a_oserr = std::cerr,
ErrorHandler  a_errhandler = nullptr,
int  a_device_id = -1 
)

◆ Initialized()

bool amrex::Initialized ( )

Returns true if there are any currently-active and initialized AMReX instances (i.e. one for which amrex::Initialize has been called, and amrex::Finalize has not). Otherwise false.

◆ InitSNaN()

bool amrex::InitSNaN ( )
noexcept

◆ InterpAddBox()

void amrex::InterpAddBox ( MultiFabCopyDescriptor fabCopyDesc,
BoxList returnUnfilledBoxes,
Vector< FillBoxId > &  returnedFillBoxIds,
const Box subbox,
MultiFabId  faid1,
MultiFabId  faid2,
Real  t1,
Real  t2,
Real  t,
int  src_comp,
int  dest_comp,
int  num_comp,
bool  extrap 
)

◆ InterpCrseFineBndryEMfield() [1/2]

void amrex::InterpCrseFineBndryEMfield ( InterpEM_t  interp_type,
const Array< MultiFab const *, 3 > &  crse,
const Array< MultiFab *, 3 > &  fine,
const Geometry cgeom,
const Geometry fgeom,
int  ref_ratio 
)

◆ InterpCrseFineBndryEMfield() [2/2]

void amrex::InterpCrseFineBndryEMfield ( InterpEM_t  interp_type,
const Array< MultiFab, 3 > &  crse,
Array< MultiFab, 3 > &  fine,
const Geometry cgeom,
const Geometry fgeom,
int  ref_ratio 
)

◆ InterpFace() [1/2]

template<typename MF , typename iMF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > amrex::InterpFace ( Interp *  interp,
MF const &  mf_crse_patch,
int  crse_comp,
MF &  mf_refined_patch,
int  fine_comp,
int  ncomp,
const IntVect ratio,
const iMF &  solve_mask,
const Geometry crse_geom,
const Geometry fine_geom,
int  bcscomp,
RunOn  gpu_or_cpu,
const Vector< BCRec > &  bcs 
)

◆ InterpFace() [2/2]

template<typename MF , typename iMF >
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFace ( InterpBase interp,
MF const &  mf_crse_patch,
int  crse_comp,
MF &  mf_refined_patch,
int  fine_comp,
int  ncomp,
const IntVect ratio,
const iMF &  solve_mask,
const Geometry crse_geom,
const Geometry fine_geom,
int  bccomp,
RunOn  gpu_or_cpu,
const Vector< BCRec > &  bcs 
)

◆ InterpFillFab()

void amrex::InterpFillFab ( MultiFabCopyDescriptor fabCopyDesc,
const Vector< FillBoxId > &  fillBoxIds,
MultiFabId  faid1,
MultiFabId  faid2,
FArrayBox dest,
Real  t1,
Real  t2,
Real  t,
int  src_comp,
int  dest_comp,
int  num_comp,
bool  extrap 
)

◆ InterpFromCoarseLevel() [1/6]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( Array< MF *, 3 > const &  mf,
IntVect const &  nghost,
Real  time,
const Array< MF *, 3 > &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
Array< BC, 3 > &  cbc,
int  cbccomp,
Array< BC, 3 > &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Array< Vector< BCRec >, 3 > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs togother to satisfy certain constraint such as divergence preserving.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MFs on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
cmfsource MFs on the coarse level
scompstarting component of the source MFs
dcompstarting component of the destination MFs
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ InterpFromCoarseLevel() [2/6]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( Array< MF *, 3 > const &  mf,
Real  time,
const Array< MF *, 3 > &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
Array< BC, 3 > &  cbc,
int  cbccomp,
Array< BC, 3 > &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Array< Vector< BCRec >, 3 > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

Fill face variables with data from the coarse level. Sometimes, we need to fillpatch all AMREX_SPACEDIM face MultiFabs together to satisfy certain constraint such as divergence preserving.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MFs on the fine level
timetime associated with mf
cmfsource MFs on the coarse level
scompstarting component of the source MFs
dcompstarting component of the destination MFs
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ InterpFromCoarseLevel() [3/6]

template<typename MF , typename Interp >
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( MF &  mf,
IntVect const &  nghost,
IntVect const &  nghost_outside_domain,
const MF &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp 
)

Fill with interpolation of coarse level data.

It's the CALLER's responsibility to make sure all ghost cells of the coarse MF needed for interpolation are filled already before calling this function. It's assumed that the fine level MultiFab mf's BoxArray is coarsenable by the refinement ratio. There is no support for EB.

Template Parameters
MFthe MultiFab/FabArray type
Interpspatial interpolater
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf inside domain needed to be filled
nghost_outside_domainnumber of ghost cells of mf outside domain needed to be filled
cmfsource MF on the coarse level
scompstarting component of the source MF
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
ratiorefinement ratio
mapperspatial interpolater
bcsboundar types for each component
bcscompstarting component for bcs

◆ InterpFromCoarseLevel() [4/6]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( MF &  mf,
IntVect const &  nghost,
Real  time,
const EB2::IndexSpace index_space,
const MF &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

Fill with interpolation of coarse level data.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
index_spaceEB IndexSpace
cmfsource MF on the coarse level
scompstarting component of the source MF
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ InterpFromCoarseLevel() [5/6]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( MF &  mf,
IntVect const &  nghost,
Real  time,
const MF &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

Fill with interpolation of coarse level data.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
nghostnumber of ghost cells of mf needed to be filled
timetime associated with mf
cmfsource MF on the coarse level
scompstarting component of the source MF
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ InterpFromCoarseLevel() [6/6]

template<typename MF , typename BC , typename Interp , typename PreInterpHook = NullInterpHook<typename MF::FABType::value_type>, typename PostInterpHook = NullInterpHook<typename MF::FABType::value_type>>
std::enable_if_t< IsFabArray< MF >::value > amrex::InterpFromCoarseLevel ( MF &  mf,
Real  time,
const MF &  cmf,
int  scomp,
int  dcomp,
int  ncomp,
const Geometry cgeom,
const Geometry fgeom,
BC &  cbc,
int  cbccomp,
BC &  fbc,
int  fbccomp,
const IntVect ratio,
Interp *  mapper,
const Vector< BCRec > &  bcs,
int  bcscomp,
const PreInterpHook &  pre_interp = {},
const PostInterpHook &  post_interp = {} 
)

Fill with interpolation of coarse level data.

All ghost cells of the destination MF are filled.

Template Parameters
MFthe MultiFab/FabArray type
BCfunctor for filling physical boundaries
Interpspatial interpolater
PreInterpHookpre-interpolation hook
PostInterpHookpost-interpolation hook
Parameters
mfdestination MF on the fine level
timetime associated with mf
cmfsource MF on the coarse level
scompstarting component of the source MF
dcompstarting component of the destination MF
ncompnumber of components
cgeomGeometry for the coarse level
fgeomGeometry for the fine level
cbcfunctor for physical boundaries on the coarse level
cbccompstarting component for cbc
fbcfunctor for physical boundaries on the fine level
fbccompstarting component for fbc
ratiorefinement ratio
mapperspatial interpolater
bcsboundary types for each component. We need this because some interpolaters need it.
bcscompstarting component for bcs
pre_interppre-interpolation hook
post_interppost-interpolation hook

◆ intersect() [1/6]

void amrex::intersect ( BoxDomain dest,
const BoxDomain fin,
const Box b 
)

Compute the intersection of BoxDomain fin with Box b and place the result into BoxDomain dest.

◆ intersect() [2/6]

BoxArray amrex::intersect ( const BoxArray ba,
const Box b,
const IntVect ng 
)

◆ intersect() [3/6]

BoxArray amrex::intersect ( const BoxArray ba,
const Box b,
int  ng 
)

Make a BoxArray from the intersection of Box b and BoxArray(+ghostcells).

◆ intersect() [4/6]

BoxList amrex::intersect ( const BoxArray ba,
const BoxList bl 
)

Make a BoxList from the intersection of BoxArray and BoxList.

◆ intersect() [5/6]

BoxArray amrex::intersect ( const BoxArray lhs,
const BoxArray rhs 
)

Make a BoxArray from the intersection of two BoxArrays.

◆ intersect() [6/6]

BoxList amrex::intersect ( const BoxList bl,
const Box b 
)

Returns a BoxList defining the intersection of bl with b.

◆ IntVectCat()

template<int d, int... dims>
__host__ __device__ constexpr IntVectND< detail::get_sum< d, dims... >()> amrex::IntVectCat ( const IntVectND< d > &  v,
const IntVectND< dims > &...  vects 
)
inlineconstexprnoexcept

Returns a IntVectND obtained by concatenating the input IntVectNDs. The dimension of the return value equals the sum of the dimensions of the inputted IntVectNDs.

◆ IntVectExpand()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > amrex::IntVectExpand ( const IntVectND< old_dim > &  iv,
int  fill_extra = 0 
)
inlineconstexprnoexcept

Returns a new IntVectND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.

◆ IntVectND() [1/2]

template<std::size_t dim>
__host__ __device__ amrex::IntVectND ( const Array< int, dim > &  ) -> IntVectND< dim >

◆ IntVectND() [2/2]

template<class... Args, std::enable_if_t< IsConvertible_v< int, Args... >, int > = 0>
__host__ __device__ amrex::IntVectND ( int  ,
int  ,
Args...   
) -> IntVectND< sizeof...(Args)+2 >

◆ IntVectResize()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > amrex::IntVectResize ( const IntVectND< old_dim > &  iv,
int  fill_extra = 0 
)
inlineconstexprnoexcept

Returns a new IntVectND of size new_dim by either shrinking or expanding iv.

◆ IntVectShrink()

template<int new_dim, int old_dim>
__host__ __device__ constexpr IntVectND< new_dim > amrex::IntVectShrink ( const IntVectND< old_dim > &  iv)
inlineconstexprnoexcept

Returns a new IntVectND of size new_dim and assigns the first new_dim values of iv to it.

◆ IntVectSplit()

template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< IntVectND< d >, IntVectND< dims >... > amrex::IntVectSplit ( const IntVectND< detail::get_sum< d, dims... >()> &  v)
inlineconstexprnoexcept

Returns a tuple of IntVectND obtained by splitting the input IntVectND according to the dimensions specified by the template arguments.

◆ InvNormDist()

double amrex::InvNormDist ( double  p)

This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution.

The algorithm uses a minimax approximation by rational functions and the result has a relative error whose absolute value is less than 1.15e-9.

Author
Peter J. Acklam Time-stamp: 2002-06-09 18:45:44 +0200 E-mail: jackl.nosp@m.am@m.nosp@m.ath.u.nosp@m.io.n.nosp@m.o WWW URL: http://www.math.uio.no/~jacklam

"p" MUST be in the open interval (0,1).

◆ InvNormDistBest()

double amrex::InvNormDistBest ( double  p)

This function returns an approximation of the inverse cumulative standard normal distribution function. I.e., given P, it returns an approximation to the X satisfying P = Pr{Z <= X} where Z is a random variable from the standard normal distribution.

Original FORTRAN77 version by Michael Wichura.

Michael Wichura, The Percentage Points of the Normal Distribution, Algorithm AS 241, Applied Statistics, Volume 37, Number 3, pages 477-484, 1988.

Our version is based on the C++ version by John Burkardt.

The algorithm uses a minimax approximation by rational functions and the result is good to roughly machine precision. This routine is roughly 30% more costly than InvNormDist() above.

"p" MUST be in the open interval (0,1).

◆ is_aligned()

bool amrex::is_aligned ( const void *  p,
std::size_t  alignment 
)
inlinenoexcept

Return whether the address p is aligned to alignment bytes.

◆ is_integer()

bool amrex::is_integer ( const char *  str)

Useful C++ Utility Functions.

Return true if argument is a non-zero length string of digits.

◆ is_it()

template<typename T >
bool amrex::is_it ( std::string const &  s,
T &  v 
)

Return true and store value in v if string s is type T.

◆ isEmpty() [1/2]

template<int dim>
bool amrex::isEmpty ( BoxND< dim > const &  b)
inlinenoexcept

◆ isEmpty() [2/2]

template<typename T , std::enable_if_t< std::is_integral_v< T >, int > = 0>
bool amrex::isEmpty ( n)
noexcept

◆ isMFIterSafe()

bool amrex::isMFIterSafe ( const FabArrayBase x,
const FabArrayBase y 
)
inline

Is it safe to have these two MultiFabs in the same MFiter? True means safe; false means maybe.

◆ IsParticleTileData() [1/2]

template<class T >
constexpr decltype(T::is_particle_tile_data) amrex::IsParticleTileData ( )
constexpr

◆ IsParticleTileData() [2/2]

template<class T , class... Args>
constexpr bool amrex::IsParticleTileData ( Args...  )
constexpr

◆ isSame()

template<typename A , typename B , std::enable_if_t< std::is_same_v< std::remove_cv_t< A >, std::remove_cv_t< B > >, int > = 0>
bool amrex::isSame ( A const *  pa,
B const *  pb 
)

◆ join() [1/2]

std::string amrex::join ( std::vector< std::string > const &  sv)

Join a vector of strings without delimiter.

◆ join() [2/2]

std::string amrex::join ( std::vector< std::string > const &  sv,
char  sep 
)

Join a vector of strings with given char sep as delimiter.

◆ launch() [1/8]

template<int MT, int dim, typename L >
void amrex::launch ( BoxND< dim > const &  box,
L const &  f 
)
noexcept

◆ launch() [2/8]

template<int MT, typename L >
void amrex::launch ( int  nblocks,
gpuStream_t  stream,
L const &  f 
)
noexcept

◆ launch() [3/8]

template<typename L >
void amrex::launch ( int  nblocks,
int  nthreads_per_block,
gpuStream_t  stream,
L &&  f 
)
noexcept

◆ launch() [4/8]

template<typename L >
void amrex::launch ( int  nblocks,
int  nthreads_per_block,
std::size_t  shared_mem_bytes,
gpuStream_t  stream,
L const &  f 
)
noexcept

◆ launch() [5/8]

template<int MT, typename L >
void amrex::launch ( int  nblocks,
std::size_t  shared_mem_bytes,
gpuStream_t  stream,
L const &  f 
)
noexcept

◆ launch() [6/8]

template<typename T , typename L >
void amrex::launch ( T const &  n,
L &&  f 
)
noexcept

◆ launch() [7/8]

template<int MT, typename T , typename L >
void amrex::launch ( T const &  n,
L &&  f 
)
noexcept

◆ launch() [8/8]

template<int MT, typename T , typename L , std::enable_if_t< std::is_integral_v< T >, int > FOO = 0>
void amrex::launch ( T const &  n,
L const &  f 
)
noexcept

◆ launch_global() [1/2]

template<class L >
__global__ void amrex::launch_global ( f0)

◆ launch_global() [2/2]

template<class L , class... Lambdas>
__global__ void amrex::launch_global ( f0,
Lambdas...  fs 
)

◆ launch_host() [1/2]

template<class L >
void amrex::launch_host ( L &&  f0)
noexcept

◆ launch_host() [2/2]

template<class L , class... Lambdas>
void amrex::launch_host ( L &&  f0,
Lambdas &&...  fs 
)
noexcept

◆ lbound() [1/2]

template<class T >
__host__ __device__ Dim3 amrex::lbound ( Array4< T > const &  a)
inlinenoexcept

◆ lbound() [2/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::lbound ( BoxND< dim > const &  box)
inlinenoexcept

◆ lbound_iv()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::lbound_iv ( BoxND< dim > const &  box)
inlinenoexcept

◆ length() [1/2]

template<class T >
__host__ __device__ Dim3 amrex::length ( Array4< T > const &  a)
inlinenoexcept

◆ length() [2/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::length ( BoxND< dim > const &  box)
inlinenoexcept

◆ length_iv()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::length_iv ( BoxND< dim > const &  box)
inlinenoexcept

◆ LevelFullPath()

std::string amrex::LevelFullPath ( int  level,
const std::string &  plotfilename,
const std::string &  levelPrefix 
)

return the full path of the level directory, e.g., plt00005/Level_5

◆ LevelPath()

std::string amrex::LevelPath ( int  level,
const std::string &  levelPrefix 
)

return the name of the level directory, e.g., Level_5

◆ LinComb() [1/3]

template<typename T , typename Allocator >
void amrex::LinComb ( AlgVector< T, Allocator > &  y,
a,
AlgVector< T, Allocator > const &  xa,
b,
AlgVector< T, Allocator > const &  xb 
)

y = a*xa + b*xb. For GPU guilds, this function is asynchronous with respect to the host.

◆ LinComb() [2/3]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::LinComb ( Array< MF, N > &  dst,
typename MF::value_type  a,
Array< MF, N > const &  src_a,
int  acomp,
typename MF::value_type  b,
Array< MF, N > const &  src_b,
int  bcomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = a*src_a + b*src_b

◆ LinComb() [3/3]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::LinComb ( MF &  dst,
typename MF::value_type  a,
MF const &  src_a,
int  acomp,
typename MF::value_type  b,
MF const &  src_b,
int  bcomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = a*src_a + b*src_b

◆ linspace()

template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0>
__host__ __device__ void amrex::linspace ( ItType  first,
const ItType &  last,
const ValType &  start,
const ValType &  stop 
)

Fill a range with linearly spaced values over a closed interval.

This function assigns linearly spaced floating-point values to the range [first, last), starting at start and ending at stop. The value range is inclusive at both ends such that the first element is set to start and the last element to stop exactly. Note that this function does nothing when the range contains fewer than two elements (i.e., last-first < 2).

Template Parameters
ItTypeiterator type.
ValTypefloating-point value type.
Parameters
firstpointing to the first element of the output range.
lastpointing one past the last element of the output range.
startstart value.
stopstop value.

◆ LocalAdd() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::LocalAdd ( Array< MF, N > &  dst,
Array< MF, N > const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst += src

◆ LocalAdd() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::LocalAdd ( MF &  dst,
MF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst += src

◆ LocalCopy() [1/2]

template<class DMF , class SMF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0>
void amrex::LocalCopy ( Array< DMF, N > &  dst,
Array< SMF, N > const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = src

◆ LocalCopy() [2/2]

template<class DMF , class SMF , std::enable_if_t< IsMultiFabLike_v< DMF > &&IsMultiFabLike_v< SMF >, int > = 0>
void amrex::LocalCopy ( DMF &  dst,
SMF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = src

◆ log()

template<typename T >
__host__ __device__ GpuComplex< T > amrex::log ( const GpuComplex< T > &  a_z)
inlinenoexcept

Complex natural logarithm function.

◆ logspace()

template<typename ItType , typename ValType , std::enable_if_t< std::is_floating_point_v< typename std::iterator_traits< ItType >::value_type > &&std::is_floating_point_v< ValType >, int > = 0>
__host__ __device__ void amrex::logspace ( ItType  first,
const ItType &  last,
const ValType &  start,
const ValType &  stop,
const ValType &  base 
)

Fill a range with logarithmically spaced values over a closed interval.

This function assigns logarithmically spaced floating-point values to the range [first, last), starting at base^start and ending at base^stop. The value range is inclusive at both ends such that the first element is set to base^start and the last element to base^stop exactly. Note that this function does nothing when the range contains fewer than two elements (i.e., last-first < 2).

Template Parameters
ItTypeiterator type.
ValTypefloating-point value type.
Parameters
firstpointing to the first element of the output range.
lastpointing one past the last element of the output range.
startstart value.
stopstop value.
basebase of the exponential.

◆ Loop() [1/4]

template<class F , int dim>
__host__ __device__ void amrex::Loop ( BoxND< dim > const &  bx,
F const &  f 
)
noexcept

◆ Loop() [2/4]

template<class F , int dim>
__host__ __device__ void amrex::Loop ( BoxND< dim > const &  bx,
int  ncomp,
F const &  f 
)
noexcept

◆ Loop() [3/4]

template<class F >
__host__ __device__ void amrex::Loop ( Dim3  lo,
Dim3  hi,
F const &  f 
)
noexcept

◆ Loop() [4/4]

template<class F >
__host__ __device__ void amrex::Loop ( Dim3  lo,
Dim3  hi,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopConcurrent() [1/4]

template<class F , int dim>
__host__ __device__ void amrex::LoopConcurrent ( BoxND< dim > const &  bx,
F const &  f 
)
noexcept

◆ LoopConcurrent() [2/4]

template<class F , int dim>
__host__ __device__ void amrex::LoopConcurrent ( BoxND< dim > const &  bx,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopConcurrent() [3/4]

template<class F >
__host__ __device__ void amrex::LoopConcurrent ( Dim3  lo,
Dim3  hi,
F const &  f 
)
noexcept

◆ LoopConcurrent() [4/4]

template<class F >
__host__ __device__ void amrex::LoopConcurrent ( Dim3  lo,
Dim3  hi,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopConcurrentOnCpu() [1/4]

template<class F , int dim>
void amrex::LoopConcurrentOnCpu ( BoxND< dim > const &  bx,
F const &  f 
)
noexcept

◆ LoopConcurrentOnCpu() [2/4]

template<class F , int dim>
void amrex::LoopConcurrentOnCpu ( BoxND< dim > const &  bx,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopConcurrentOnCpu() [3/4]

template<class F >
void amrex::LoopConcurrentOnCpu ( Dim3  lo,
Dim3  hi,
F const &  f 
)
noexcept

◆ LoopConcurrentOnCpu() [4/4]

template<class F >
void amrex::LoopConcurrentOnCpu ( Dim3  lo,
Dim3  hi,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopOnCpu() [1/4]

template<class F , int dim>
void amrex::LoopOnCpu ( BoxND< dim > const &  bx,
F const &  f 
)
noexcept

◆ LoopOnCpu() [2/4]

template<class F , int dim>
void amrex::LoopOnCpu ( BoxND< dim > const &  bx,
int  ncomp,
F const &  f 
)
noexcept

◆ LoopOnCpu() [3/4]

template<class F >
void amrex::LoopOnCpu ( Dim3  lo,
Dim3  hi,
F const &  f 
)
noexcept

◆ LoopOnCpu() [4/4]

template<class F >
void amrex::LoopOnCpu ( Dim3  lo,
Dim3  hi,
int  ncomp,
F const &  f 
)
noexcept

◆ lower_bound()

template<typename ItType , typename ValType >
__host__ __device__ ItType amrex::lower_bound ( ItType  first,
ItType  last,
const ValType &  val 
)

Return an iterator to the first element not less than a given value.

This function is an implementation of std::lower_bound that works on both host and device.

Template Parameters
ItTypeiterator type.
ValTypevalue type.
Parameters
firstinclusive lower bound of the search range.
lastexclusive upper bound of the search range.
valvalue to compare the elements to.
Returns
an iterator pointing to the first element not less than val.

◆ makeArray4()

template<typename T >
__host__ __device__ Array4< T > amrex::makeArray4 ( T *  p,
Box const &  bx,
int  ncomp 
)
inlinenoexcept

◆ makeFineMask() [1/7]

iMultiFab amrex::makeFineMask ( const BoxArray cba,
const DistributionMapping cdm,
const BoxArray fba,
const IntVect ratio,
int  crse_value,
int  fine_value 
)

◆ makeFineMask() [2/7]

MultiFab amrex::makeFineMask ( const BoxArray cba,
const DistributionMapping cdm,
const BoxArray fba,
const IntVect ratio,
Real  crse_value,
Real  fine_value 
)

◆ makeFineMask() [3/7]

iMultiFab amrex::makeFineMask ( const BoxArray cba,
const DistributionMapping cdm,
const IntVect cnghost,
const BoxArray fba,
const IntVect ratio,
Periodicity const &  period,
int  crse_value,
int  fine_value 
)

◆ makeFineMask() [4/7]

template<typename FAB >
iMultiFab amrex::makeFineMask ( const FabArray< FAB > &  cmf,
const BoxArray fba,
const IntVect ratio,
int  crse_value = 0,
int  fine_value = 1 
)

Return an iMultiFab that has the same BoxArray and DistributionMapping as the coarse MultiFab cmf. Cells covered by the coarsened fine grids are set to fine_value, whereas other cells are set to crse_value.

◆ makeFineMask() [5/7]

template<typename FAB >
iMultiFab amrex::makeFineMask ( const FabArray< FAB > &  cmf,
const BoxArray fba,
const IntVect ratio,
Periodicity const &  period,
int  crse_value,
int  fine_value 
)

◆ makeFineMask() [6/7]

template<typename FAB >
iMultiFab amrex::makeFineMask ( const FabArray< FAB > &  cmf,
const FabArray< FAB > &  fmf,
const IntVect cnghost,
const IntVect ratio,
Periodicity const &  period,
int  crse_value,
int  fine_value 
)

◆ makeFineMask() [7/7]

template<typename FAB >
iMultiFab amrex::makeFineMask ( const FabArray< FAB > &  cmf,
const FabArray< FAB > &  fmf,
const IntVect cnghost,
const IntVect ratio,
Periodicity const &  period,
int  crse_value,
int  fine_value,
LayoutData< int > &  has_cf 
)

◆ makeFineMask_doit()

template<typename FAB >
void amrex::makeFineMask_doit ( FabArray< FAB > &  mask,
const BoxArray fba,
const IntVect ratio,
Periodicity const &  period,
typename FAB::value_type  crse_value,
typename FAB::value_type  fine_value 
)

◆ makeHypre()

std::unique_ptr< Hypre > amrex::makeHypre ( const BoxArray grids,
const DistributionMapping dmap,
const Geometry geom,
MPI_Comm  comm_,
Hypre::Interface  interface,
const iMultiFab overset_mask 
)

◆ MakeITracker() [1/2]

void amrex::MakeITracker ( amrex::Box const &  bx,
amrex::Array4< amrex::Real const > const &  apx,
amrex::Array4< amrex::Real const > const &  apy,
amrex::Array4< amrex::Real const > const &  apz,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< int > const &  itracker,
amrex::Geometry const &  geom,
amrex::Real  target_volfrac 
)

◆ MakeITracker() [2/2]

void amrex::MakeITracker ( Box const &  bx,
Array4< Real const > const &  apx,
Array4< Real const > const &  apy,
Array4< Real const > const &  apz,
Array4< Real const > const &  vfrac,
Array4< int > const &  itracker,
Geometry const &  lev_geom,
Real  target_volfrac 
)

◆ makePetsc()

std::unique_ptr< PETScABecLap > amrex::makePetsc ( const BoxArray grids,
const DistributionMapping dmap,
const Geometry geom,
MPI_Comm  comm_ 
)

◆ makePolymorphic()

template<typename T >
PolymorphicArray4< T > amrex::makePolymorphic ( Array4< T > const &  a)

◆ MakeSimilarDM() [1/2]

DistributionMapping amrex::MakeSimilarDM ( const BoxArray ba,
const BoxArray src_ba,
const DistributionMapping src_dm,
const IntVect ng 
)

Function that creates a DistributionMapping "similar" to that of a MultiFab.

"Similar" means that, if a box in "ba" intersects with any of the boxes in the BoxArray associated with "mf", taking "ngrow" ghost cells into account, then that box will be assigned to the proc owning the one it has the maximum amount of overlap with.

Parameters
[in]baThe BoxArray we want to generate a DistributionMapping for.
[in]src_baThe BoxArray associated with the src DistributionMapping.
[in]src_dmThe input DistributionMapping we want the output to be similar to.
[in]ngThe number of grow cells to use when computing intersection / overlap
Returns
The computed DistributionMapping.

◆ MakeSimilarDM() [2/2]

DistributionMapping amrex::MakeSimilarDM ( const BoxArray ba,
const MultiFab mf,
const IntVect ng 
)

Function that creates a DistributionMapping "similar" to that of a MultiFab.

"Similar" means that, if a box in "ba" intersects with any of the boxes in the BoxArray associated with "mf", taking "ngrow" ghost cells into account, then that box will be assigned to the proc owning the one it has the maximum amount of overlap with.

Parameters
[in]baThe BoxArray we want to generate a DistributionMapping for.
[in]mfThe MultiFab we want said DistributionMapping to be similar to.
[in]ngThe number of grow cells to use when computing intersection / overlap
Returns
The computed DistributionMapping.

◆ makeSingleCellBox() [1/2]

template<int dim = 3, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ BoxND< dim > amrex::makeSingleCellBox ( int  i,
int  j,
int  k,
IndexTypeND< dim >  typ = IndexTypeND<dim>::TheCellType() 
)
inline

◆ makeSingleCellBox() [2/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::makeSingleCellBox ( IntVectND< dim > const &  vect,
IndexTypeND< dim >  typ = IndexTypeND<dim>::TheCellType() 
)
inline

◆ makeSlab()

template<int dim>
__host__ __device__ BoxND< dim > amrex::makeSlab ( BoxND< dim > const &  b,
int  direction,
int  slab_index 
)
inlinenoexcept

◆ MakeStateRedistUtils() [1/2]

void amrex::MakeStateRedistUtils ( amrex::Box const &  bx,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  ccent,
amrex::Array4< int const > const &  itracker,
amrex::Array4< amrex::Real > const &  nrs,
amrex::Array4< amrex::Real > const &  alpha,
amrex::Array4< amrex::Real > const &  nbhd_vol,
amrex::Array4< amrex::Real > const &  cent_hat,
amrex::Geometry const &  geom,
amrex::Real  target_volfrac 
)

◆ MakeStateRedistUtils() [2/2]

void amrex::MakeStateRedistUtils ( Box const &  bx,
Array4< EBCellFlag const > const &  flag,
Array4< Real const > const &  vfrac,
Array4< Real const > const &  ccent,
Array4< int const > const &  itracker,
Array4< Real > const &  nrs,
Array4< Real > const &  alpha,
Array4< Real > const &  nbhd_vol,
Array4< Real > const &  cent_hat,
Geometry const &  lev_geom,
Real  target_vol 
)

◆ makeTuple()

template<typename... Ts>
__host__ __device__ constexpr GpuTuple< detail::tuple_decay_t< Ts >... > amrex::makeTuple ( Ts &&...  args)
constexpr

◆ makeXDim3()

XDim3 amrex::makeXDim3 ( const Array< Real, 3 > &  a)
inlinenoexcept

◆ MakeZeroTuple()

template<typename... Ts>
__host__ __device__ constexpr GpuTuple< Ts... > amrex::MakeZeroTuple ( GpuTuple< Ts... >  )
constexprnoexcept

Return a GpuTuple containing all zeros. Note that a default-constructed GpuTuple can have uninitialized values.

◆ match()

bool amrex::match ( const BoxArray x,
const BoxArray y 
)

Note that two BoxArrays that match are not necessarily equal.

◆ max() [1/4]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::max ( const IntVectND< dim > &  p1,
const IntVectND< dim > &  p2 
)
inlineconstexprnoexcept

Returns the IntVectND that is the component-wise maximum of two argument IntVectNDs.

◆ max() [2/4]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::max ( const RealVectND< dim > &  p1,
const RealVectND< dim > &  p2 
)
inlinenoexcept

Returns the RealVectND that is the component-wise maximum of two argument RealVectNDs.

◆ max() [3/4]

template<class T >
__host__ __device__ constexpr const T & amrex::max ( const T &  a,
const T &  b 
)
inlineconstexprnoexcept

Return the greater value. This function was added to AMReX to support GPU before std::max was constexpr in C++14. std::max can now be used directly instead.

◆ max() [4/4]

template<class T , class ... Ts>
__host__ __device__ constexpr const T & amrex::max ( const T &  a,
const T &  b,
const Ts &...  c 
)
inlineconstexprnoexcept

Return the greatest value. This function was added to AMReX to support GPU before std::max was constexpr in C++14. std::max can now be used directly instead.

◆ max_lbound() [1/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::max_lbound ( BoxND< dim > const &  b1,
BoxND< dim > const &  b2 
)
inlinenoexcept

◆ max_lbound() [2/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::max_lbound ( BoxND< dim > const &  b1,
Dim3 const &  lo 
)
inlinenoexcept

◆ max_lbound_iv() [1/2]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::max_lbound_iv ( BoxND< dim > const &  b1,
BoxND< dim > const &  b2 
)
inlinenoexcept

◆ max_lbound_iv() [2/2]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::max_lbound_iv ( BoxND< dim > const &  b1,
IntVectND< dim > const &  lo 
)
inlinenoexcept

◆ MeshToParticle()

template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::MeshToParticle ( PC &  pc,
MF const &  mf,
int  lev,
F const &  f 
)

◆ min() [1/4]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::min ( const IntVectND< dim > &  p1,
const IntVectND< dim > &  p2 
)
inlineconstexprnoexcept

Returns the IntVectND that is the component-wise minimum of two argument IntVectNDs.

◆ min() [2/4]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::min ( const RealVectND< dim > &  p1,
const RealVectND< dim > &  p2 
)
inlinenoexcept

Returns the RealVectND that is the component-wise minimum of two argument RealVectNDs.

◆ min() [3/4]

template<class T >
__host__ __device__ constexpr const T & amrex::min ( const T &  a,
const T &  b 
)
inlineconstexprnoexcept

Return the smaller value. This function was added to AMReX to support GPU before std::min was constexpr in C++14. std::min can now be used directly instead.

◆ min() [4/4]

template<class T , class ... Ts>
__host__ __device__ constexpr const T & amrex::min ( const T &  a,
const T &  b,
const Ts &...  c 
)
inlineconstexprnoexcept

Return the smallest value. This function was added to AMReX to support GPU before std::min was constexpr in C++14. std::min can now be used directly instead.

◆ min_ubound() [1/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::min_ubound ( BoxND< dim > const &  b1,
BoxND< dim > const &  b2 
)
inlinenoexcept

◆ min_ubound() [2/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::min_ubound ( BoxND< dim > const &  b1,
Dim3 const &  hi 
)
inlinenoexcept

◆ min_ubound_iv() [1/2]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::min_ubound_iv ( BoxND< dim > const &  b1,
BoxND< dim > const &  b2 
)
inlinenoexcept

◆ min_ubound_iv() [2/2]

template<int dim>
__host__ __device__ IntVectND< dim > amrex::min_ubound_iv ( BoxND< dim > const &  b1,
IntVectND< dim > const &  hi 
)
inlinenoexcept

◆ minBox()

template<int dim>
__host__ __device__ BoxND< dim > amrex::minBox ( const BoxND< dim > &  b1,
const BoxND< dim > &  b2 
)
inlinenoexcept

Modify BoxND to that of the minimum BoxND containing both the original BoxND and the argument. Both BoxNDes must have identical type.

◆ MLStateRedistribute() [1/2]

void amrex::MLStateRedistribute ( amrex::Box const &  bx,
int  ncomp,
amrex::Array4< amrex::Real > const &  U_out,
amrex::Array4< amrex::Real > const &  U_in,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccent,
amrex::BCRec const *  d_bcrec_ptr,
amrex::Array4< int const > const &  itracker,
amrex::Array4< amrex::Real const > const &  nrs,
amrex::Array4< amrex::Real const > const &  alpha,
amrex::Array4< amrex::Real const > const &  nbhd_vol,
amrex::Array4< amrex::Real const > const &  cent_hat,
amrex::Geometry const &  geom,
int  as_crse,
Array4< Real > const &  drho_as_crse,
Array4< int const > const &  flag_as_crse,
int  as_fine,
Array4< Real > const &  dm_as_fine,
Array4< int const > const &  levmsk,
int  is_ghost_cell,
amrex::Real  fac_for_deltaR,
int  max_order = 2 
)

◆ MLStateRedistribute() [2/2]

void amrex::MLStateRedistribute ( Box const &  bx,
int  ncomp,
Array4< Real > const &  U_out,
Array4< Real > const &  U_in,
Array4< EBCellFlag const > const &  flag,
Array4< Real const > const &  vfrac,
Array4< Real const > const &  fcx,
Array4< Real const > const &  fcy,
Array4< Real const > const &  fcz,
Array4< Real const > const &  ccent,
amrex::BCRec const *  d_bcrec_ptr,
Array4< int const > const &  itracker,
Array4< Real const > const &  nrs,
Array4< Real const > const &  alpha,
Array4< Real const > const &  nbhd_vol,
Array4< Real const > const &  cent_hat,
Geometry const &  lev_geom,
int  as_crse,
Array4< Real > const &  drho_as_crse,
Array4< int const > const &  flag_as_crse,
int  as_fine,
Array4< Real > const &  dm_as_fine,
Array4< int const > const &  levmsk,
int  is_ghost_cell,
Real  fac_for_deltaR,
int  max_order 
)

◆ MultiFabFileFullPrefix()

std::string amrex::MultiFabFileFullPrefix ( int  level,
const std::string &  plotfilename,
const std::string &  levelPrefix,
const std::string &  mfPrefix 
)

return the full path multifab prefix, e.g., plt00005/Level_5/Cell

◆ MultiFabHeaderPath()

std::string amrex::MultiFabHeaderPath ( int  level,
const std::string &  levelPrefix,
const std::string &  mfPrefix 
)

return the path of the multifab to write to the header, e.g., Level_5/Cell

◆ Multiply() [1/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Multiply ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Multiply() [2/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Multiply ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ nBytesOwned() [1/2]

template<typename T >
Long amrex::nBytesOwned ( BaseFab< T > const &  fab)
noexcept

◆ nBytesOwned() [2/2]

template<typename T , std::enable_if_t<!IsBaseFab< T >::value, int > = 0>
Long amrex::nBytesOwned ( T const &  )
noexcept

◆ nComp() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
int amrex::nComp ( Array< MF, N > const &  mf)

◆ nComp() [2/2]

int amrex::nComp ( FabArrayBase const &  fa)

◆ nGrowVect() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF > &&(N > 0), int > = 0>
IntVect amrex::nGrowVect ( Array< MF, N > const &  mf)

◆ nGrowVect() [2/2]

IntVect amrex::nGrowVect ( FabArrayBase const &  fa)

◆ norm()

template<typename T >
__host__ __device__ T amrex::norm ( const GpuComplex< T > &  a_z)
inlinenoexcept

Return the norm (magnitude squared) of a complex number.

◆ NormHelper() [1/2]

template<typename MMF , typename Pred , typename F >
Real amrex::NormHelper ( const MMF &  mask,
const MultiFab x,
int  xcomp,
const MultiFab y,
int  ycomp,
Pred const &  pf,
F const &  f,
int  numcomp,
IntVect  nghost,
bool  local 
)

Returns part of a norm based on three MultiFabs.

The MultiFabs MUST have the same underlying BoxArray. The Predicate pf is used to test the mask The function f is applied elementwise as f(x(i,j,k,n),y(i,j,k,n)) inside the summation (subject to a valid mask entry pf(mask(i,j,k,n)

◆ NormHelper() [2/2]

template<typename F >
Real amrex::NormHelper ( const MultiFab x,
int  xcomp,
const MultiFab y,
int  ycomp,
F const &  f,
int  numcomp,
IntVect  nghost,
bool  local 
)

Returns part of a norm based on two MultiFabs.

The MultiFabs MUST have the same underlying BoxArray. The function f is applied elementwise as f(x(i,j,k,n),y(i,j,k,n)) inside the summation (subject to a valid mask entry pf(mask(i,j,k,n)

◆ norminf() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
MF::value_type amrex::norminf ( Array< MF, N > const &  mf,
int  scomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

◆ norminf() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
MF::value_type amrex::norminf ( MF const &  mf,
int  scomp,
int  ncomp,
IntVect const &  nghost,
bool  local = false 
)

◆ numParticlesOutOfRange() [1/6]

template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( Iterator const &  pti,
int  nGrow 
)

Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator.

Template Parameters
Iteratoran AMReX ParticleIterator
Parameters
ptithe iterator pointing to the current grid/tile to test
nGrowthe number of grow cells allowed.

◆ numParticlesOutOfRange() [2/6]

template<class Iterator , std::enable_if_t< IsParticleIterator< Iterator >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( Iterator const &  pti,
IntVect  nGrow 
)

Returns the number of particles that are more than nGrow cells from the box correspond to the input iterator.

Template Parameters
Iteratoran AMReX ParticleIterator
Parameters
ptithe iterator pointing to the current grid/tile to test
nGrowthe number of grow cells allowed.

◆ numParticlesOutOfRange() [3/6]

template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( PC const &  pc,
int  lev_min,
int  lev_max,
int  nGrow 
)

Returns the number of particles that are more than nGrow cells from their assigned box.

This version goes over only the specified levels

Template Parameters
PCa type of AMReX particle container.
Parameters
pcthe particle container to test
lev_minthe minimum level to test
lev_maxthe maximum level to test
nGrowthe number of grow cells allowed.

◆ numParticlesOutOfRange() [4/6]

template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( PC const &  pc,
int  lev_min,
int  lev_max,
IntVect  nGrow 
)

Returns the number of particles that are more than nGrow cells from their assigned box.

This version goes over only the specified levels

Template Parameters
PCa type of AMReX particle container.
Parameters
pcthe particle container to test
lev_minthe minimum level to test
lev_maxthe maximum level to test
nGrowthe number of grow cells allowed.

◆ numParticlesOutOfRange() [5/6]

template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( PC const &  pc,
int  nGrow 
)

Returns the number of particles that are more than nGrow cells from their assigned box.

This version tests over all levels.

Template Parameters
PCa type of AMReX particle container.
Parameters
pcthe particle container to test
nGrowthe number of grow cells allowed.

◆ numParticlesOutOfRange() [6/6]

template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
int amrex::numParticlesOutOfRange ( PC const &  pc,
IntVect  nGrow 
)

Returns the number of particles that are more than nGrow cells from their assigned box.

This version tests over all levels.

Template Parameters
PCa type of AMReX particle container.
Parameters
pcthe particle container to test
nGrowthe number of grow cells allowed.

◆ numTilesInBox()

__host__ __device__ int amrex::numTilesInBox ( const Box box,
const bool  a_do_tiling,
const IntVect a_tile_size 
)
inline

◆ numUniquePhysicalCores()

int amrex::numUniquePhysicalCores ( )

...

◆ operator!=()

template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0>
bool amrex::operator!= ( A1 const &  a1,
A2 const &  a2 
)

◆ operator&()

FPExcept amrex::operator& ( FPExcept  a,
FPExcept  b 
)
inline

◆ operator*() [1/8]

template<typename T , typename U >
__host__ __device__ GpuComplex< T > amrex::operator* ( const GpuComplex< T > &  a_x,
const GpuComplex< U > &  a_y 
)
inlinenoexcept

Multiply two complex numbers.

◆ operator*() [2/8]

template<typename T , typename U >
__host__ __device__ GpuComplex< T > amrex::operator* ( const GpuComplex< T > &  a_x,
const U &  a_y 
)
inlinenoexcept

Multiply a complex number by a real one.

◆ operator*() [3/8]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator* ( const RealVectND< dim > &  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns component-wise product of s and p.

◆ operator*() [4/8]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator* ( const T &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Multiply a real number by a complex one.

◆ operator*() [5/8]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::operator* ( int  s,
const IntVectND< dim > &  p 
)
inlineconstexprnoexcept

Returns p * s.

◆ operator*() [6/8]

template<typename LLs , typename... As>
constexpr auto amrex::operator* ( LLs  ,
TypeList< As... >   
)
constexpr

◆ operator*() [7/8]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator* ( Real  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns a RealVectND that is a RealVectND p with each component multiplied by a scalar s.

◆ operator*() [8/8]

template<class U , class V , int N1, int N2, int N3, Order Ord, int SI>
__host__ __device__ decltype(auto) amrex::operator* ( SmallMatrix< U, N1, N2, Ord, SI > const &  lhs,
SmallMatrix< V, N2, N3, Ord, SI > const &  rhs 
)
inline

◆ operator+() [1/9]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator+ ( const GpuComplex< T > &  a_x)
inline

Identity operation on a complex number.

◆ operator+() [2/9]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator+ ( const GpuComplex< T > &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Add two complex numbers.

◆ operator+() [3/9]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator+ ( const GpuComplex< T > &  a_x,
const T &  a_y 
)
inlinenoexcept

Add a real number to a complex one.

◆ operator+() [4/9]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator+ ( const RealVectND< dim > &  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns component-wise sum of RealVectNDs s and p.

◆ operator+() [5/9]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator+ ( const T &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Add a complex number to a real one.

◆ operator+() [6/9]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::operator+ ( int  s,
const IntVectND< dim > &  p 
)
inlineconstexprnoexcept

Returns p + s.

◆ operator+() [7/9]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator+ ( Real  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns a RealVectND that is a RealVectND p with a scalar s added to each component.

◆ operator+() [8/9]

template<typename... As, typename... Bs>
constexpr auto amrex::operator+ ( TypeList< As... >  ,
TypeList< Bs... >   
)
constexpr

Concatenate two TypeLists.

◆ operator+() [9/9]

__host__ __device__ XDim3 amrex::operator+ ( XDim3 const &  a,
XDim3 const &  b 
)
inline

◆ operator-() [1/8]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator- ( const GpuComplex< T > &  a_x)
inline

Negate a complex number.

◆ operator-() [2/8]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator- ( const GpuComplex< T > &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Subtract two complex numbers.

◆ operator-() [3/8]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator- ( const GpuComplex< T > &  a_x,
const T &  a_y 
)
inlinenoexcept

Subtract a real number from a complex one.

◆ operator-() [4/8]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator- ( const RealVectND< dim > &  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns s - p.

◆ operator-() [5/8]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator- ( const T &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Subtract a complex number from a real one.

◆ operator-() [6/8]

template<int dim>
__host__ __device__ __host__ __device__ constexpr IntVectND< dim > amrex::operator- ( int  s,
const IntVectND< dim > &  p 
)
inlineconstexprnoexcept

Returns -p + s.

◆ operator-() [7/8]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator- ( Real  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns s - p.

◆ operator-() [8/8]

__host__ __device__ XDim3 amrex::operator- ( XDim3 const &  a,
XDim3 const &  b 
)
inline

◆ operator/() [1/5]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator/ ( const GpuComplex< T > &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Divide a complex number by another one.

◆ operator/() [2/5]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator/ ( const GpuComplex< T > &  a_x,
const T &  a_y 
)
inlinenoexcept

Divide a complex number by a real.

◆ operator/() [3/5]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator/ ( const RealVectND< dim > &  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns component-wise quotient p / s.

◆ operator/() [4/5]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::operator/ ( const T &  a_x,
const GpuComplex< T > &  a_y 
)
inlinenoexcept

Divide a real number by a complex one.

◆ operator/() [5/5]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::operator/ ( Real  s,
const RealVectND< dim > &  p 
)
inlinenoexcept

Returns a RealVectND that is a RealVectND p with each component divided by a scalar s.

◆ operator<<() [1/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const Geometry g 
)

Nice ASCII output.

◆ operator<<() [2/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const RealBox b 
)

Nice ASCII output.

◆ operator<<() [3/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
AmrMesh const &  amr_mesh 
)

◆ operator<<() [4/41]

template<typename T >
std::ostream & amrex::operator<< ( std::ostream &  os,
Array< T, 3 > const &  a 
)

◆ operator<<() [5/41]

template<typename T , int N, bool C>
std::ostream & amrex::operator<< ( std::ostream &  os,
const ArrayND< T, N, C > &  a 
)

◆ operator<<() [6/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const BCRec b 
)

◆ operator<<() [7/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const BoxArray ba 
)

Write a BoxArray to an ostream in ASCII format.

◆ operator<<() [8/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const BoxArray::RefID id 
)

◆ operator<<() [9/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const BoxDomain bd 
)

Output a BoxDomain to an ostream is ASCII format.

◆ operator<<() [10/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const BoxList blist 
)

Output a BoxList to an ostream in ASCII format.

◆ operator<<() [11/41]

template<int dim>
std::ostream & amrex::operator<< ( std::ostream &  os,
const BoxND< dim > &  bx 
)

Write an ASCII representation to the ostream.

◆ operator<<() [12/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const CArena arena 
)

◆ operator<<() [13/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const CoordSys c 
)

◆ operator<<() [14/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const dim3 &  d 
)

◆ operator<<() [15/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const DistributionMapping pmap 
)

Our output operator.

◆ operator<<() [16/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const DistributionMapping::RefID id 
)

◆ operator<<() [17/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const EBCellFlag flag 
)

◆ operator<<() [18/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const ErrorList elst 
)

◆ operator<<() [19/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const FabArrayBase::BDKey id 
)

◆ operator<<() [20/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const FArrayBox f 
)

◆ operator<<() [21/41]

template<int dim>
std::ostream & amrex::operator<< ( std::ostream &  os,
const IndexTypeND< dim > &  it 
)

Write an IndexTypeND to an ostream in ASCII.

◆ operator<<() [22/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const IntDescriptor id 
)

Write out an IntDescriptor to an ostream in ASCII.

◆ operator<<() [23/41]

template<int dim>
std::ostream & amrex::operator<< ( std::ostream &  os,
const IntVectND< dim > &  iv 
)

◆ operator<<() [24/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const LinOpBCType t 
)

◆ operator<<() [25/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const Mask m 
)

◆ operator<<() [26/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const MemProfiler::Builds &  builds 
)

◆ operator<<() [27/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const MemProfiler::Bytes &  bytes 
)

◆ operator<<() [28/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const Orientation o 
)

Write to an ostream in ASCII format.

◆ operator<<() [29/41]

template<int NReal = 0, int NInt = 0>
std::ostream & amrex::operator<< ( std::ostream &  os,
const Particle< 0, 0 > &  p 
)

◆ operator<<() [30/41]

template<int NInt>
std::ostream & amrex::operator<< ( std::ostream &  os,
const Particle< 0, NInt > &  p 
)

◆ operator<<() [31/41]

template<int NReal>
std::ostream & amrex::operator<< ( std::ostream &  os,
const Particle< NReal, 0 > &  p 
)

◆ operator<<() [32/41]

template<int NReal, int NInt>
std::ostream & amrex::operator<< ( std::ostream &  os,
const Particle< NReal, NInt > &  p 
)

◆ operator<<() [33/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const amrex::RealDescriptor rd 
)

Write out an RealDescriptor to an ostream in ASCII.

◆ operator<<() [34/41]

template<int dim>
std::ostream & amrex::operator<< ( std::ostream &  os,
const RealVectND< dim > &  p 
)

◆ operator<<() [35/41]

template<typename T , typename S >
std::ostream & amrex::operator<< ( std::ostream &  os,
const std::pair< T, S > &  v 
)

◆ operator<<() [36/41]

template<typename T , std::enable_if_t< std::is_same_v< T, Dim3 >||std::is_same_v< T, XDim3 > > * = nullptr>
std::ostream & amrex::operator<< ( std::ostream &  os,
const T &  d 
)

◆ operator<<() [37/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const Vector< VisMF::FabOnDisk > &  fa 
)

Write an Vector<FabOnDisk> to an ostream in ASCII.

◆ operator<<() [38/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const VisMF::FabOnDisk fod 
)

Write a FabOnDisk to an ostream in ASCII.

◆ operator<<() [39/41]

std::ostream & amrex::operator<< ( std::ostream &  os,
const VisMF::Header hd 
)

Write a VisMF::Header to an ostream in ASCII.

◆ operator<<() [40/41]

template<class T , int NRows, int NCols, Order ORDER, int SI>
std::ostream & amrex::operator<< ( std::ostream &  os,
SmallMatrix< T, NRows, NCols, ORDER, SI > const &  mat 
)

◆ operator<<() [41/41]

template<typename U >
std::ostream & amrex::operator<< ( std::ostream &  out,
const GpuComplex< U > &  c 
)

◆ operator==()

template<typename A1 , typename A2 , std::enable_if_t< IsArenaAllocator< A1 >::value &&IsArenaAllocator< A2 >::value, int > = 0>
bool amrex::operator== ( A1 const &  a1,
A2 const &  a2 
)

◆ operator>>() [1/16]

std::istream & amrex::operator>> ( std::istream &  is,
const expect exp 
)

◆ operator>>() [2/16]

std::istream & amrex::operator>> ( std::istream &  is,
Geometry g 
)

Nice ASCII input.

◆ operator>>() [3/16]

std::istream & amrex::operator>> ( std::istream &  is,
RealBox b 
)

Nice ASCII input.

◆ operator>>() [4/16]

template<int dim>
std::istream & amrex::operator>> ( std::istream &  is,
BoxND< dim > &  bx 
)

Read from istream.

◆ operator>>() [5/16]

std::istream & amrex::operator>> ( std::istream &  is,
CoordSys c 
)

◆ operator>>() [6/16]

std::istream & amrex::operator>> ( std::istream &  is,
FArrayBox f 
)

◆ operator>>() [7/16]

template<int dim>
std::istream & amrex::operator>> ( std::istream &  is,
IndexTypeND< dim > &  it 
)

Read an IndexTypeND from an istream.

◆ operator>>() [8/16]

std::istream & amrex::operator>> ( std::istream &  is,
IntDescriptor id 
)

Read in an IntDescriptor from an istream.

◆ operator>>() [9/16]

template<int dim>
std::istream & amrex::operator>> ( std::istream &  is,
IntVectND< dim > &  iv 
)

◆ operator>>() [10/16]

std::istream & amrex::operator>> ( std::istream &  is,
Mask m 
)

◆ operator>>() [11/16]

std::istream & amrex::operator>> ( std::istream &  is,
Orientation o 
)

◆ operator>>() [12/16]

std::istream & amrex::operator>> ( std::istream &  is,
amrex::RealDescriptor rd 
)

Read in a RealDescriptor from an istream.

◆ operator>>() [13/16]

template<int dim>
std::istream & amrex::operator>> ( std::istream &  is,
RealVectND< dim > &  p 
)

◆ operator>>() [14/16]

std::istream & amrex::operator>> ( std::istream &  is,
Vector< VisMF::FabOnDisk > &  fa 
)

Read an Vector<FabOnDisk> from an istream.

◆ operator>>() [15/16]

std::istream & amrex::operator>> ( std::istream &  is,
VisMF::FabOnDisk fod 
)

Read a FabOnDisk from an istream.

◆ operator>>() [16/16]

std::istream & amrex::operator>> ( std::istream &  is,
VisMF::Header hd 
)

Read a VisMF::Header from an istream.

◆ operator|()

FPExcept amrex::operator| ( FPExcept  a,
FPExcept  b 
)
inline

◆ OutOfMemory()

void amrex::OutOfMemory ( )

Aborts after printing message indicating out-of-memory; i.e. operator new has failed. This is the "supported" set_new_handler() function for AMReX applications.

◆ OutStream()

std::ostream & amrex::OutStream ( )

◆ OverlapMask()

template<class FAB >
FabArray< BaseFab< int > > amrex::OverlapMask ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
Periodicity const &  period 
)

◆ OverrideSync()

template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>>
void amrex::OverrideSync ( FabArray< FAB > &  fa,
FabArray< IFAB > const &  msk,
const Periodicity period 
)

◆ OverrideSync_finish()

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::OverrideSync_finish ( FabArray< FAB > &  fa)

◆ OverrideSync_nowait()

template<class FAB , class IFAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value && IsBaseFab<IFAB>::value>>
void amrex::OverrideSync_nowait ( FabArray< FAB > &  fa,
FabArray< IFAB > const &  msk,
const Periodicity period 
)

◆ OwnerMask()

std::unique_ptr< iMultiFab > amrex::OwnerMask ( FabArrayBase const &  mf,
const Periodicity period,
const IntVect ngrow 
)

◆ packBuffer()

template<class PC , class Buffer , std::enable_if_t< IsParticleContainer< PC >::value &&std::is_base_of_v< PolymorphicArenaAllocator< typename Buffer::value_type >, Buffer >, int > foo = 0>
void amrex::packBuffer ( const PC &  pc,
const ParticleCopyOp op,
const ParticleCopyPlan plan,
Buffer &  snd_buffer 
)

◆ ParallelCopy() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::ParallelCopy ( Array< MF, N > &  dst,
Array< MF, N > const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  ng_src = IntVect(0),
IntVect const &  ng_dst = IntVect(0),
Periodicity const &  period = Periodicity::NonPeriodic() 
)

dst = src w/ MPI communication

◆ ParallelCopy() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::ParallelCopy ( MF &  dst,
MF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  ng_src = IntVect(0),
IntVect const &  ng_dst = IntVect(0),
Periodicity const &  period = Periodicity::NonPeriodic() 
)

dst = src w/ MPI communication

◆ ParallelFor() [1/43]

template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelFor ( BoxND< dim > const &  box,
L const &  f 
)
noexcept

◆ ParallelFor() [2/43]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelFor ( BoxND< dim > const &  box,
ncomp,
L const &  f 
)
noexcept

◆ ParallelFor() [3/43]

template<typename L , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ ParallelFor() [4/43]

template<int MT, typename L , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
L &&  f 
)
noexcept

◆ ParallelFor() [5/43]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ ParallelFor() [6/43]

template<int MT, typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box,
ncomp,
L &&  f 
)
noexcept

◆ ParallelFor() [7/43]

template<typename L1 , typename L2 , typename L3 , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ ParallelFor() [8/43]

template<int MT, typename L1 , typename L2 , typename L3 , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
BoxND< dim > const &  box3,
L1 &&  f1,
L2 &&  f2,
L3 &&  f3 
)
noexcept

◆ ParallelFor() [9/43]

template<typename L1 , typename L2 , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ ParallelFor() [10/43]

template<int MT, typename L1 , typename L2 , int dim>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
BoxND< dim > const &  box2,
L1 &&  f1,
L2 &&  f2 
)
noexcept

◆ ParallelFor() [11/43]

template<typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ ParallelFor() [12/43]

template<int MT, typename T1 , typename T2 , typename L1 , typename L2 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2 
)
noexcept

◆ ParallelFor() [13/43]

template<typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ ParallelFor() [14/43]

template<int MT, typename T1 , typename T2 , typename T3 , typename L1 , typename L2 , typename L3 , int dim, typename M1 = std::enable_if_t<std::is_integral_v<T1>>, typename M2 = std::enable_if_t<std::is_integral_v<T2>>, typename M3 = std::enable_if_t<std::is_integral_v<T3>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
BoxND< dim > const &  box1,
T1  ncomp1,
L1 &&  f1,
BoxND< dim > const &  box2,
T2  ncomp2,
L2 &&  f2,
BoxND< dim > const &  box3,
T3  ncomp3,
L3 &&  f3 
)
noexcept

◆ ParallelFor() [15/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
n,
L &&  f 
)
noexcept

◆ ParallelFor() [16/43]

template<int MT, typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
void amrex::ParallelFor ( Gpu::KernelInfo const &  ,
n,
L &&  f 
)
noexcept

◆ ParallelFor() [17/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid region. If built for CPU, tiling will be enabled. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [18/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid region. If built for CPU, tiling will be enabled. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [19/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [20/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [21/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [22/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [23/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
TileSize const &  ts,
DynamicTiling  dt,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
tstile size, ignored by GPU build
dtcontrols dynamic tiling for the cpu build
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [24/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
TileSize const &  ts,
DynamicTiling  dt,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
tstile size, ignored by GPU build
dtcontrols dynamic tiling for the cpu build
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [25/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
tstile size, ignored by GPU build
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [26/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
int  ncomp,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 5D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
ncompthe number of component
tstile size, ignored by GPU build
fa callable object void(int,int,int,int,int), where the first argument is the local box index, the following three are spatial indices for x, y, and z-directions, and the last is for component.

◆ ParallelFor() [27/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
TileSize const &  ts,
DynamicTiling  dt,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
tstile size, ignored by GPU build
dtcontrols dynamic tiling for the cpu build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [28/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
TileSize const &  ts,
DynamicTiling  dt,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For GPU builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
tstile size, ignored by GPU build
dtcontrols dynamic tiling for the cpu build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [29/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
tstile size, ignored by GPU build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [30/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
IntVect const &  ng,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid and ghost regions. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
ngthe number of ghost cells around the valid region
tstile size, ignored by GPU build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [31/43]

template<typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid region. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
tstile size, ignored by GPU build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [32/43]

template<int MT, typename MF , typename F >
std::enable_if_t< IsFabArray< MF >::value > amrex::ParallelFor ( MF const &  mf,
TileSize const &  ts,
F &&  f 
)

ParallelFor for MultiFab/FabArray.

This version launches a kernel to work on the valid region. If built for CPU, tiling will be enabled. However, one could specify a huge tile size to effectively disable tiling. For gpu builds, this function is NON-BLOCKING on the host. Conceptually, this is a 4D loop.

Template Parameters
MTmax threads in GPU blocks (Only relevant for GPU builds)
MFthe MultiFab/FabArray type
Fa callable type like lambda
Parameters
mfthe MultiFab/FabArray object used to specify the iteration space
tstile size, ignored by GPU build
fa callable object void(int,int,int,int), where the first argument is the local box index, and the following three are spatial indices for x, y, and z-directions.

◆ ParallelFor() [33/43]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelFor ( n,
L const &  f 
)
noexcept

◆ ParallelFor() [34/43]

void amrex::ParallelFor ( TagVector< TagType > const &  tv,
F const &  f 
)

◆ ParallelFor() [35/43]

template<class TagType , class F >
std::enable_if_t< std::is_same_v< std::decay_t< decltype(std::declval< TagType >().box())>, Box > amrex::ParallelFor ( TagVector< TagType > const &  tv,
int  ncomp,
F const &  f 
)

◆ ParallelFor() [36/43]

template<class F , int dim, typename... CTOs>
void amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  option,
BoxND< dim > const &  box,
F &&  f 
)

ParallelFor with compile time optimization of kernels with run time options.

It uses fold expression to generate kernel launches for all combinations of the run time options. The kernel function can use constexpr if to discard unused code blocks for better run time performance. In the example below, the code will be expanded into 4*2=8 normal ParallelFors for all combinations of the run time parameters.

int A_runtime_option = ...;
int B_runtime_option = ...;
enum A_options : int { A0, A1, A2, A3};
enum B_options : int { B0, B1 };
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
CompileTimeOptions<B0,B1>>{},
{A_runtime_option, B_runtime_option},
box, [=] AMREX_GPU_DEVICE (int i, int j, int k,
auto A_control, auto B_control)
{
...
if constexpr (A_control.value == A0) {
...
} else if constexpr (A_control.value == A1) {
...
} else if constexpr (A_control.value == A2) {
...
} else {
...
}
if constexpr (A_control.value != A3 && B_control.value == B1) {
...
}
...
});

Note that due to a limitation of CUDA's extended device lambda, the constexpr if block cannot be the one that captures a variable first. If nvcc complains about it, you will have to manually capture it outside constexpr if. The data type for the parameters is int.

Parameters
ctoslist of all possible values of the parameters.
optionthe run time parameters.
boxa Box specifying the 3D for loop's range.
fa callable object taking three integers and working on the given cell.

◆ ParallelFor() [37/43]

template<typename T , class F , int dim, typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  option,
BoxND< dim > const &  box,
ncomp,
F &&  f 
)

ParallelFor with compile time optimization of kernels with run time options.

It uses fold expression to generate kernel launches for all combinations of the run time options. The kernel function can use constexpr if to discard unused code blocks for better run time performance. In the example below, the code will be expanded into 4*2=8 normal ParallelFors for all combinations of the run time parameters.

int A_runtime_option = ...;
int B_runtime_option = ...;
enum A_options : int { A0, A1, A2, A3};
enum B_options : int { B0, B1 };
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
CompileTimeOptions<B0,B1>>{},
{A_runtime_option, B_runtime_option},
box, ncomp, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n,
auto A_control, auto B_control)
{
...
if constexpr (A_control.value == A0) {
...
} else if constexpr (A_control.value == A1) {
...
} else if constexpr (A_control.value == A2) {
...
} else {
...
}
if constexpr (A_control.value != A3 && B_control.value == B1) {
...
}
...
});

Note that due to a limitation of CUDA's extended device lambda, the constexpr if block cannot be the one that captures a variable first. If nvcc complains about it, you will have to manually capture it outside constexpr if. The data type for the parameters is int.

Parameters
ctoslist of all possible values of the parameters.
optionthe run time parameters.
boxa Box specifying the iteration in 3D space.
ncompan integer specifying the range for iteration over components.
fa callable object taking three integers and working on the given cell.

◆ ParallelFor() [38/43]

template<typename T , class F , typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  option,
N,
F &&  f 
)

ParallelFor with compile time optimization of kernels with run time options.

It uses fold expression to generate kernel launches for all combinations of the run time options. The kernel function can use constexpr if to discard unused code blocks for better run time performance. In the example below, the code will be expanded into 4*2=8 normal ParallelFors for all combinations of the run time parameters.

int A_runtime_option = ...;
int B_runtime_option = ...;
enum A_options : int { A0, A1, A2, A3};
enum B_options : int { B0, B1 };
ParallelFor(TypeList<CompileTimeOptions<A0,A1,A2,A3>,
CompileTimeOptions<B0,B1>>{},
{A_runtime_option, B_runtime_option},
N, [=] AMREX_GPU_DEVICE (int i, auto A_control, auto B_control)
{
...
if constexpr (A_control.value == A0) {
...
} else if constexpr (A_control.value == A1) {
...
} else if constexpr (A_control.value == A2) {
...
} else {
...
}
if constexpr (A_control.value != A3 && B_control.value == B1) {
...
}
...
});

Note that due to a limitation of CUDA's extended device lambda, the constexpr if block cannot be the one that captures a variable first. If nvcc complains about it, you will have to manually capture it outside constexpr if. The data type for the parameters is int.

Parameters
ctoslist of all possible values of the parameters.
optionthe run time parameters.
Nan integer specifying the 1D for loop's range.
fa callable object taking an integer and working on that iteration.

◆ ParallelFor() [39/43]

template<int MT, class F , int dim, typename... CTOs>
void amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  runtime_options,
BoxND< dim > const &  box,
F &&  f 
)

◆ ParallelFor() [40/43]

template<int MT, typename T , class F , int dim, typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  runtime_options,
BoxND< dim > const &  box,
ncomp,
F &&  f 
)

◆ ParallelFor() [41/43]

template<int MT, typename T , class F , typename... CTOs>
std::enable_if_t< std::is_integral_v< T > > amrex::ParallelFor ( TypeList< CTOs... >  ctos,
std::array< int, sizeof...(CTOs)> const &  runtime_options,
N,
F &&  f 
)

◆ ParallelFor() [42/43]

void amrex::ParallelFor ( Vector< TagType > const &  tags,
F &&  f 
)

◆ ParallelFor() [43/43]

template<class TagType , class F >
std::enable_if_t< std::is_same_v< std::decay_t< decltype(std::declval< TagType >().box())>, Box > amrex::ParallelFor ( Vector< TagType > const &  tags,
int  ncomp,
F &&  f 
)

◆ ParallelForRNG() [1/3]

template<typename L , int dim>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelForRNG ( BoxND< dim > const &  box,
L const &  f 
)
noexcept

◆ ParallelForRNG() [2/3]

template<typename T , typename L , int dim, typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelForRNG ( BoxND< dim > const &  box,
ncomp,
L const &  f 
)
noexcept

◆ ParallelForRNG() [3/3]

template<typename T , typename L , typename M = std::enable_if_t<std::is_integral_v<T>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelForRNG ( n,
L const &  f 
)
noexcept

◆ ParallelForSIMD()

template<int WIDTH, typename N , typename L , typename M = std::enable_if_t<std::is_integral_v<N>>>
AMREX_ATTRIBUTE_FLATTEN_FOR void amrex::ParallelForSIMD ( n,
L const &  f 
)
noexcept

ParallelFor with a SIMD Width (in elements)

SIMD load/Write-back operations need to be performed before/after calling this.

Template Parameters
WIDTHSIMD width in elements
Nindex type (integer)
Lfunction/functor to call per SIMD set of elements

◆ ParReduce() [1/6]

template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
T amrex::ParReduce ( TypeList< Op >  operation_list,
TypeList< T >  type_list,
FabArray< FAB > const &  fa,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid region. For example, the code below computes the sum of the processed data in a MultiFab.

auto const& ma = mf.const_arrays();
Real ektot = ParReduce(TypeList<ReduceOpSum>{}, TypeList<Real>{}, mf,
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
-> GpuTuple<Real>
{
auto rho = ma[box_no](i,j,k,0);
auto mx = ma[box_no](i,j,k,1);
auto my = ma[box_no](i,j,k,2);
auto mz = ma[box_no](i,j,k,3);
auto ek = (mx*mx+my*my+mz*mz)/(2.*rho);
return { ek };
});
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility ...
Definition AMReX_ParReduce.H:48
Template Parameters
OpReduce operator (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Tdata type (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_lista reduce operator stored in TypeList
type_lista data type stored in TypeList
faa MultiFab/FabArray object used to specify the iteration space
fa callable object returning GpuTuple<T>. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (T)

◆ ParReduce() [2/6]

template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
T amrex::ParReduce ( TypeList< Op >  operation_list,
TypeList< T >  type_list,
FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid and specified ghost regions. For example, the code below computes the sum of the processed data in a MultiFab.

auto const& ma = mf.const_arrays();
Real ektot = ParReduce(TypeList<ReduceOpSum>{}, TypeList<Real>{},
mf, IntVect(0),
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
-> GpuTuple<Real>
{
auto rho = ma[box_no](i,j,k,0);
auto mx = ma[box_no](i,j,k,1);
auto my = ma[box_no](i,j,k,2);
auto mz = ma[box_no](i,j,k,3);
auto ek = (mx*mx+my*my+mz*mz)/(2.*rho);
return { ek };
});
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
Template Parameters
OpReduce operator (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Tdata type (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_lista reduce operator stored in TypeList
type_lista data type stored in TypeList
faa MultiFab/FabArray object used to specify the iteration space
nghostthe number of ghost cells included in the iteration space
fa callable object returning GpuTuple<T>. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (T)

◆ ParReduce() [3/6]

template<typename Op , typename T , typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
T amrex::ParReduce ( TypeList< Op >  operation_list,
TypeList< T >  type_list,
FabArray< FAB > const &  fa,
IntVect const &  nghost,
int  ncomp,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid and specified ghost regions. For example, the code below computes the sum of the data in a MultiFab.

auto const& ma = mf.const_arrays();
Real ektot = ParReduce(TypeList<ReduceOpSum>{}, TypeList<Real>{},
mf, mf.nGrowVect(), mf.nComp(),
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
-> GpuTuple<Real>
{
return { ma[box_no](i,j,k,n) };
});
Template Parameters
OpReduce operator (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Tdata type (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_lista reduce operator stored in TypeList
type_lista data type stored in TypeList
faa MultiFab/FabArray object used to specify the iteration space
nghostthe number of ghost cells included in the iteration space
ncompthe number of components in the iteration space
fa callable object returning GpuTuple<T>. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (T)

◆ ParReduce() [4/6]

template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type amrex::ParReduce ( TypeList< Ops... >  operation_list,
TypeList< Ts... >  type_list,
FabArray< FAB > const &  fa,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid region. For example, the code below computes the minimum of the first MultiFab and the maximum of the second MultiFab.

auto const& ma1 = mf1.const_arrays();
auto const& ma2 = mf2.const_arrays();
TypeList<Real,Real>{}, mf1,
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
-> GpuTuple<Real,Real>
{
return { ma1[box_no](i,j,k), ma2[box_no](i,j,k) };
});
GPU-compatible tuple.
Definition AMReX_Tuple.H:98
Template Parameters
Ops...reduce operators (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Ts...data types (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_listlist of reduce operators
type_listlist of data types
faa MultiFab/FabArray object used to specify the iteration space
fa callable object returning GpuTuple<Ts...>. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (GpuTuple<Ts...>)

◆ ParReduce() [5/6]

template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type amrex::ParReduce ( TypeList< Ops... >  operation_list,
TypeList< Ts... >  type_list,
FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid and specified ghost regions. For example, the code below computes the minimum of the first MultiFab and the maximum of the second MultiFab.

auto const& ma1 = mf1.const_arrays();
auto const& ma2 = mf2.const_arrays();
TypeList<Real,Real>{},
mf1, mf1.nGrowVect(),
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
-> GpuTuple<Real,Real>
{
return { ma1[box_no](i,j,k), ma2[box_no](i,j,k) };
});
Template Parameters
Ops...reduce operators (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Ts...data types (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_listlist of reduce operators
type_listlist of data types
faa MultiFab/FabArray object used to specify the iteration space
nghostthe number of ghost cells included in the iteration space
fa callable object returning GpuTuple<Ts...>. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (GpuTuple<Ts...>)

◆ ParReduce() [6/6]

template<typename... Ops, typename... Ts, typename FAB , typename F , typename foo = std::enable_if_t<IsBaseFab<FAB>::value>>
ReduceData< Ts... >::Type amrex::ParReduce ( TypeList< Ops... >  operation_list,
TypeList< Ts... >  type_list,
FabArray< FAB > const &  fa,
IntVect const &  nghost,
int  ncomp,
F &&  f 
)

Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility if MPI communication is needed.

This performs reduction over a MultiFab's valid and specified ghost regions and components. For example, the code below computes the minimum of the first MultiFab and the maximum of the second MultiFab.

auto const& ma1 = mf1.const_arrays();
auto const& ma2 = mf2.const_arrays();
TypeList<Real,Real>{},
mf1, mf1.nGrowVect(), mf1.nComp(),
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
-> GpuTuple<Real,Real>
{
return { ma1[box_no](i,j,k,n), ma2[box_no](i,j,k,n) };
});
Template Parameters
Ops...reduce operators (e.g., ReduceOpSum, ReduceOpMin, ReduceOpMax, ReduceOpLogicalAnd, and ReduceOpLogicalOr)
Ts...data types (e.g., Real, int, etc.)
FABMultiFab/FabArray type
Fcallable type like a lambda function
Parameters
operation_listlist of reduce operators
type_listlist of data types
faa MultiFab/FabArray object used to specify the iteration space
nghostthe number of ghost cells included in the iteration space
ncompthe number of components in the iteration space
fa callable object returning GpuTuple<Ts...>. It takes five ints, where the first int is the local box index, the next three are spatial indices for x, y, and z-directions, and the last is for component.
Returns
reduction result (GpuTuple<Ts...>)

◆ ParticleReduce() [1/3]

template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type amrex::ParticleReduce ( PC const &  pc,
F &&  f,
ReduceOps reduce_ops 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version can operate on a GpuTuple worth of data at once. It also takes an arbitrary tuple of reduction operators.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Unlike the other reduction functions in this file, this version does not respect the Gpu::launchRegion flag. If AMReX is built with GPU support, this reduction will always be done on the device.

Template Parameters
RDan amrex::ReduceData type
PCthe ParticleContainer type
Fa function object
ReduceOpsa ReduceOps type
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle, see below for example forms.
reduce_opsspecifies the reduction operations for each tuple element

Example usage:

using PType = typename PC::ParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const PType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using SPType = typename PC::SuperParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const SPType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using ConstPTDType = typename PC::ConstPTDType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const ConstPTDType& ptd, const int i) noexcept
{
const amrex::Real a = ptd.rdata(1)[i];
const amrex::Real b = ptd.rdata(2)[i];
const int c = ptd.idata(1)[i];
return {a, b, c};
}, reduce_ops);
Definition AMReX_Reduce.H:389

◆ ParticleReduce() [2/3]

template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type amrex::ParticleReduce ( PC const &  pc,
int  lev,
F &&  f,
ReduceOps reduce_ops 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version can operate on a GpuTuple worth of data at once. It also takes an arbitrary tuple of reduction operators.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Unlike the other reduction functions in this file, this version does not respect the Gpu::launchRegion flag. If AMReX is built with GPU support, this reduction will always be done on the device.

Template Parameters
RDan amrex::ReduceData type
PCthe ParticleContainer type
Fa function object
ReduceOpsa ReduceOps type
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle, see below for example forms.
reduce_opsspecifies the reduction operations for each tuple element

Example usage:

using PType = typename PC::ParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const PType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using SPType = typename PC::SuperParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const SPType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using ConstPTDType = typename PC::ConstPTDType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const ConstPTDType& ptd, const int i) noexcept
{
const amrex::Real a = ptd.rdata(1)[i];
const amrex::Real b = ptd.rdata(2)[i];
const int c = ptd.idata(1)[i];
return {a, b, c};
}, reduce_ops);

◆ ParticleReduce() [3/3]

template<class RD , class PC , class F , class ReduceOps , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
RD::Type amrex::ParticleReduce ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f,
ReduceOps reduce_ops 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version can operate on a GpuTuple worth of data at once. It also takes an arbitrary tuple of reduction operators.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Unlike the other reduction functions in this file, this version does not respect the Gpu::launchRegion flag. If AMReX is built with GPU support, this reduction will always be done on the device.

Template Parameters
RDan amrex::ReduceData type
PCthe ParticleContainer type
Fa function object
ReduceOpsa ReduceOps type
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle, see below for example forms.
reduce_opsspecifies the reduction operations for each tuple element

Example usage:

using PType = typename PC::ParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const PType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using SPType = typename PC::SuperParticleType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const SPType& p) noexcept
{
const amrex::Real a = p.rdata(1);
const amrex::Real b = p.rdata(2);
const int c = p.idata(1);
return {a, b, c};
}, reduce_ops);
using ConstPTDType = typename PC::ConstPTDType;
auto r = amrex::ParticleReduce<ReduceData<amrex::Real, amrex::Real,int>> (
pc, [=] AMREX_GPU_DEVICE (const ConstPTDType& ptd, const int i) noexcept
{
const amrex::Real a = ptd.rdata(1)[i];
const amrex::Real b = ptd.rdata(2)[i];
const int c = ptd.idata(1)[i];
return {a, b, c};
}, reduce_ops);

◆ ParticleToMesh() [1/2]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::ParticleToMesh ( PC const &  pc,
const Vector< MultiFab * > &  mf,
int  lev_min,
int  lev_max,
F &&  f,
bool  zero_out_input = true,
bool  vol_weight = true 
)

◆ ParticleToMesh() [2/2]

template<class PC , class MF , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::ParticleToMesh ( PC const &  pc,
MF &  mf,
int  lev,
F const &  f,
bool  zero_out_input = true 
)

◆ Partition() [1/3]

template<typename T , typename F >
int amrex::Partition ( Gpu::DeviceVector< T > &  v,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is not stable, if you want that behavior use amrex::StablePartition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
va Gpu::DeviceVector with the data to be partitioned.
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ Partition() [2/3]

template<typename T , typename F >
int amrex::Partition ( T *  data,
int  beg,
int  end,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is not stable, if you want that behavior use amrex::StablePartition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
datapointer to the data to be partitioned
begindex at which to start
endindex at which to stop (exclusive)
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ Partition() [3/3]

template<typename T , typename F >
int amrex::Partition ( T *  data,
int  n,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is not stable, if you want that behavior use amrex::StablePartition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
datapointer to the data to be partitioned
nthe number of elements in the array
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ partitionParticles() [1/2]

template<typename PTile , typename ParFunc >
void amrex::partitionParticles ( PTile &  ptile,
int  num_left,
ParFunc const &  is_left 
)

Reorders the ParticleTile into two partitions left [0, num_left-1] and right [num_left, ptile.numParticles()-1]. This version of the function requires the correct amount for num_left to be passed as an input, which allows it to skip a reduction. 

The functor is_left [(ParticleTileData ptd, int index) -> bool] maps each particle to either the left [return true] or the right [return false] partition. It must return the same result if evaluated multiple times for the same particle.

Parameters
ptilethe ParticleTile to partition
num_leftnumber of particles in the left partition
is_leftfunctor to map particles to a partition

◆ partitionParticles() [2/2]

template<typename PTile , typename ParFunc >
int amrex::partitionParticles ( PTile &  ptile,
ParFunc const &  is_left 
)

Reorders the ParticleTile into two partitions left [0, num_left-1] and right [num_left, ptile.numParticles()-1] and returns the number of particles in the left partition.

The functor is_left [(ParticleTileData ptd, int index) -> bool] maps each particle to either the left [return true] or the right [return false] partition. It must return the same result if evaluated multiple times for the same particle.

Parameters
ptilethe ParticleTile to partition
is_leftfunctor to map particles to a partition

◆ partitionParticlesByDest()

template<typename PTile , typename PLocator , typename CellAssignor >
int amrex::partitionParticlesByDest ( PTile &  ptile,
const PLocator &  ploc,
CellAssignor const &  assignor,
const ParticleBufferMap pmap,
const GpuArray< Real, 3 > &  plo,
const GpuArray< Real, 3 > &  phi,
const GpuArray< ParticleReal, 3 > &  rlo,
const GpuArray< ParticleReal, 3 > &  rhi,
const GpuArray< int, 3 > &  is_per,
int  lev,
int  gid,
int  ,
int  lev_min,
int  lev_max,
int  nGrow,
bool  remove_negative 
)

◆ pcg_solve()

template<int N, typename T , typename M , typename P >
__host__ __device__ int amrex::pcg_solve ( T *__restrict__  x,
T *__restrict__  r,
M const &  mat,
P const &  precond,
int  maxiter,
rel_tol 
)
inline

Preconditioned conjugate gradient solver.

Parameters
xinitial guess
rinitial residual
matmatrix
precondpreconditioner
maxitermax number of iterations
rel_tolrelative tolerance

◆ periodicShift()

MultiFab amrex::periodicShift ( MultiFab const &  mf,
IntVect const &  offset,
Periodicity const &  period 
)

Periodic shift MultiFab.

◆ PermutationForDeposition() [1/2]

template<class index_type , class PTile >
void amrex::PermutationForDeposition ( Gpu::DeviceVector< index_type > &  perm,
index_type  nitems,
const PTile &  ptile,
Box  bx,
Geometry  geom,
const IntVect  idx_type 
)

◆ PermutationForDeposition() [2/2]

template<class index_type , typename F >
void amrex::PermutationForDeposition ( Gpu::DeviceVector< index_type > &  perm,
index_type  nitems,
index_type  nbins,
F const &  f 
)

◆ placementDelete() [1/2]

template<typename T >
std::enable_if_t<!std::is_trivially_destructible_v< T > > amrex::placementDelete ( T *const  ptr,
Long  n 
)

◆ placementDelete() [2/2]

template<typename T >
std::enable_if_t< std::is_trivially_destructible_v< T > > amrex::placementDelete ( T * const  ,
Long   
)

◆ placementNew() [1/3]

template<typename T >
std::enable_if_t< std::is_trivially_default_constructible_v< T > &&!std::is_arithmetic_v< T > > amrex::placementNew ( T *const  ptr,
Long  n 
)

◆ placementNew() [2/3]

template<typename T >
std::enable_if_t<!std::is_trivially_default_constructible_v< T > > amrex::placementNew ( T *const  ptr,
Long  n 
)

◆ placementNew() [3/3]

template<typename T >
std::enable_if_t< std::is_arithmetic_v< T > > amrex::placementNew ( T * const  ,
Long   
)

◆ polar()

template<typename T >
__host__ __device__ GpuComplex< T > amrex::polar ( const T &  a_r,
const T &  a_theta 
)
inlinenoexcept

Return a complex number given its polar representation.

◆ pout()

std::ostream & amrex::pout ( )

the stream that all output except error msgs should use

Use this in place of std::cout for program output.

In serial this is the standard output, in parallel it is a different file on each proc (see setPoutBaseName()).

Can be used to replace std::cout. In serial this just returns std::cout. In parallel, this creates a separate file for each proc called <basename>.n where n is the procID and <basename> defaults to "pout" but can be set by calling setPoutBaseName(). Output is then directed to these files. This keeps the output from different processors from getting all jumbled up. If you want fewer files, you can use ParmParse parameter amrex.pout_int=nproc and it will only output every nproc processors pout.n files (where nnproc == 0).

◆ poutFileName()

const std::string & amrex::poutFileName ( )

return the current filename as used by pout()

Accesses the filename for the local pout() file.

in serial, just return the string "cout"; abort if MPI is not initialized.

Returns the name used for the local pout() file. In parallel this is "\<pout_basename\>.\<procID\>", where <pout_basename> defaults to "pout" and can be modified by calling setPoutBaseName(), and <procID> is the local proc number. In serial, this always returns the string "cout". It is an error (exit code 111) to call this in parallel before MPI_Initialize().

◆ pow() [1/2]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::pow ( const GpuComplex< T > &  a_z,
const T &  a_y 
)
inlinenoexcept

Raise a complex number to a (real) power.

◆ pow() [2/2]

template<typename T >
__host__ __device__ GpuComplex< T > amrex::pow ( const GpuComplex< T > &  a_z,
int  a_n 
)
inlinenoexcept

Raise a complex number to an integer power.

◆ PreBuildDirectorHierarchy()

void amrex::PreBuildDirectorHierarchy ( const std::string &  dirName,
const std::string &  subDirPrefix,
int  nSubDirs,
bool  callBarrier 
)

prebuild a hierarchy of directories dirName is built first. if dirName exists, it is renamed. then build dirName/subDirPrefix_0 .. dirName/subDirPrefix_nSubDirs-1 if callBarrier is true, call ParallelDescriptor::Barrier() after all directories are built ParallelDescriptor::IOProcessor() creates the directories

Parameters
&dirName
&subDirPrefix
nSubDirs
callBarrier

◆ prefetchToDevice()

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::prefetchToDevice ( FabArray< FAB > const &  fa,
const bool  synchronous = true 
)

◆ prefetchToHost()

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::prefetchToHost ( FabArray< FAB > const &  fa,
const bool  synchronous = true 
)

◆ print_state()

void amrex::print_state ( const MultiFab mf,
const IntVect cell,
const int  n,
const IntVect ng 
)

Output state data for a single zone.

◆ printCell()

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::printCell ( FabArray< FAB > const &  mf,
const IntVect cell,
int  comp = -1,
const IntVect ng = IntVect::TheZeroVector() 
)

◆ ProperlyNested()

template<typename Interp >
bool amrex::ProperlyNested ( const IntVect ratio,
const IntVect blocking_factor,
int  ngrow,
const IndexType boxType,
Interp *  mapper 
)

Test if AMR grids are properly nested.

If grids are not properly nested, FillPatch functions may fail.

Template Parameters
InterpInterpolater type
Parameters
ratiorefinement ratio
blocking_factorblocking factor on the fine level
ngrownumber of ghost cells of fine MultiFab
boxTypeindex type
mapperan interpolater object

◆ Read()

template<typename FAB >
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > amrex::Read ( FabArray< FAB > &  fa,
const std::string &  name 
)

Read iMultiFab/FabArray<IArrayBox>

This reads an iMultiFab/FabArray<IArrayBox> from disk. If it has been fully defined, the BoxArray on the disk must match the BoxArray in the given iMultiFab/FabArray<IArrayBox> object. If it is only constructed with the default constructor, the BoxArray on the disk will be used and a new DistributionMapping will be made. When this function is used to restart a calculation from checkpoint files, one should use a fully defined iMultiFab/FabArray<IArrayBox> except for the first one in a series of iMultiFab/MultiFab objects that share the same BoxArray/DistributionMapping. This will ensure that they share the same BoxArray/DistributionMapping after restart.

Parameters
fais the iMultiFab.
nameis the base name for the files.

◆ readBoxArray()

void amrex::readBoxArray ( BoxArray ba,
std::istream &  is,
bool  bReadSpecial 
)

Read a BoxArray from a stream. If b is true, read in a special way.

◆ readData() [1/4]

void amrex::readData ( double *  data,
std::size_t  size,
std::istream &  is 
)
inline

◆ readData() [2/4]

void amrex::readData ( float *  data,
std::size_t  size,
std::istream &  is 
)
inline

◆ readData() [3/4]

void amrex::readData ( int data,
std::size_t  size,
std::istream &  is 
)
inline

◆ readData() [4/4]

void amrex::readData ( Long data,
std::size_t  size,
std::istream &  is 
)
inline

◆ readDoubleData()

void amrex::readDoubleData ( double *  data,
std::size_t  size,
std::istream &  is,
const RealDescriptor rd 
)

Read double data from the istream. The arguments are a pointer to data buffer to read into, the size of that buffer, the istream, and a RealDescriptor that describes the format of the data on disk. The buffer is assumed to be large enough to store 'size' Reals, and it is the user's reponsiblity to allocate this data.

◆ readFloatData()

void amrex::readFloatData ( float *  data,
std::size_t  size,
std::istream &  is,
const RealDescriptor rd 
)

Read float data from the istream. The arguments are a pointer to data buffer to read into, the size of that buffer, the istream, and a RealDescriptor that describes the format of the data on disk. The buffer is assumed to be large enough to store 'size' Reals, and it is the user's reponsiblity to allocate this data.

◆ ReadHDF5Attr()

static int amrex::ReadHDF5Attr ( hid_t  loc,
const char *  name,
void *  data,
hid_t  dtype 
)
static

◆ readIntData() [1/2]

void amrex::readIntData ( int data,
std::size_t  size,
std::istream &  is,
const IntDescriptor id 
)

Read int data from the istream. The arguments are a pointer to data buffer to read into, the size of that buffer, the istream, and an IntDescriptor that describes the format of the data on disk. The buffer is assumed to be large enough to store 'size' integers, and it is the user's reponsiblity to allocate this data.

◆ readIntData() [2/2]

template<typename To , typename From >
void amrex::readIntData ( To *  data,
std::size_t  size,
std::istream &  is,
const amrex::IntDescriptor id 
)

◆ readLongData()

void amrex::readLongData ( Long data,
std::size_t  size,
std::istream &  is,
const IntDescriptor id 
)

Read int data from the istream. The arguments are a pointer to data buffer to read into, the size of that buffer, the istream, and an IntDescriptor that describes the format of the data on disk. The buffer is assumed to be large enough to store 'size' longs, and it is the user's reponsiblity to allocate this data.

◆ readRealData()

void amrex::readRealData ( Real data,
std::size_t  size,
std::istream &  is,
const RealDescriptor rd 
)

Read Real data from the istream. The arguments are a pointer to data buffer to read into, the size of that buffer, the istream, and a RealDescriptor that describes the format of the data on disk. The buffer is assumed to be large enough to store 'size' Reals, and it is the user's reponsiblity to allocate this data.

◆ RealVectCat()

template<int d, int... dims>
__host__ __device__ constexpr RealVectND< detail::get_sum< d, dims... >()> amrex::RealVectCat ( const RealVectND< d > &  v,
const RealVectND< dims > &...  vects 
)
inlineconstexprnoexcept

Returns a RealVectND obtained by concatenating the input RealVectNDs. The dimension of the return value equals the sum of the dimensions of the inputted RealVectNDs.

◆ RealVectExpand()

template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > amrex::RealVectExpand ( const RealVectND< old_dim > &  iv,
Real  fill_extra = 0 
)
inlineconstexprnoexcept

Returns a new RealVectND of size new_dim and assigns all values of iv to it and fill_extra to the remaining elements.

◆ RealVectND() [1/3]

template<int dim>
__host__ __device__ amrex::RealVectND ( const GpuArray< Real, dim > &  ) -> RealVectND< dim >

◆ RealVectND() [2/3]

template<int dim>
__host__ __device__ amrex::RealVectND ( const IntVectND< dim > &  ) -> RealVectND< dim >

◆ RealVectND() [3/3]

template<class... Args, std::enable_if_t< IsConvertible_v< Real, Args... >, int > = 0>
__host__ __device__ amrex::RealVectND ( Real  ,
Real  ,
Args...   
) -> RealVectND< sizeof...(Args)+2 >

◆ RealVectResize()

template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > amrex::RealVectResize ( const RealVectND< old_dim > &  iv,
Real  fill_extra = 0 
)
inlineconstexprnoexcept

Returns a new RealVectND of size new_dim by either shrinking or expanding iv.

◆ RealVectShrink()

template<int new_dim, int old_dim>
__host__ __device__ constexpr RealVectND< new_dim > amrex::RealVectShrink ( const RealVectND< old_dim > &  iv)
inlineconstexprnoexcept

Returns a new RealVectND of size new_dim and assigns the first new_dim values of iv to it.

◆ RealVectSplit()

template<int d, int... dims>
__host__ __device__ constexpr GpuTuple< RealVectND< d >, RealVectND< dims >... > amrex::RealVectSplit ( const RealVectND< detail::get_sum< d, dims... >()> &  v)
inlineconstexprnoexcept

Returns a tuple of RealVectND obtained by splitting the input RealVectND according to the dimensions specified by the template arguments.

◆ ReduceLogicalAnd() [1/7]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool amrex::ReduceLogicalAnd ( FabArray< FAB > const &  fa,
int  nghost,
F &&  f 
)

◆ ReduceLogicalAnd() [2/7]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool amrex::ReduceLogicalAnd ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceLogicalAnd() [3/7]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool amrex::ReduceLogicalAnd ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
int  nghost,
F &&  f 
)

◆ ReduceLogicalAnd() [4/7]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool amrex::ReduceLogicalAnd ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceLogicalAnd() [5/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalAnd ( PC const &  pc,
F &&  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version uses "LogicalAnd" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return ptd.id(i).is_valid();
});
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
__host__ __device__ bool is_valid(const uint64_t idcpu) noexcept
Definition AMReX_Particle.H:146
bool ReduceLogicalAnd(FabArray< FAB > const &fa, int nghost, F &&f)
Definition AMReX_FabArrayUtility.H:794

◆ ReduceLogicalAnd() [6/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalAnd ( PC const &  pc,
int  lev,
F &&  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version uses "LogicalAnd" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return ptd.id(i).is_valid();
});

◆ ReduceLogicalAnd() [7/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalAnd ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version uses "LogicalAnd" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return ptd.id(i).is_valid();
});

◆ ReduceLogicalOr() [1/7]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool amrex::ReduceLogicalOr ( FabArray< FAB > const &  fa,
int  nghost,
F &&  f 
)

◆ ReduceLogicalOr() [2/7]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
bool amrex::ReduceLogicalOr ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceLogicalOr() [3/7]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool amrex::ReduceLogicalOr ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
int  nghost,
F &&  f 
)

◆ ReduceLogicalOr() [4/7]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
bool amrex::ReduceLogicalOr ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceLogicalOr() [5/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalOr ( PC const &  pc,
F &&  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version uses "LogicalOr" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return !p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return !p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return !ptd.id(i).is_valid();
});
bool ReduceLogicalOr(FabArray< FAB > const &fa, int nghost, F &&f)
Definition AMReX_FabArrayUtility.H:949

◆ ReduceLogicalOr() [6/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalOr ( PC const &  pc,
int  lev,
F &&  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version uses "LogicalOr" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return !p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return !p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return !ptd.id(i).is_valid();
});

◆ ReduceLogicalOr() [7/7]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
bool amrex::ReduceLogicalOr ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f 
)

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version uses "LogicalOr" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> bool
{
return !p.id().is_valid();
});
using SPType = typename PC::SuperParticleType;
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> bool
{
return !p.id().is_valid();
});
using ConstPTDType = typename PC::ConstPTDType;
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> bool
{
return !ptd.id(i).is_valid();
});

◆ ReduceMax() [1/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceMax ( FabArray< FAB > const &  fa,
int  nghost,
F &&  f 
)

◆ ReduceMax() [2/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceMax ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMax() [3/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMax ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
int  nghost,
F &&  f 
)

◆ ReduceMax() [4/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMax ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMax() [5/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMax ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
int  nghost,
F &&  f 
)

◆ ReduceMax() [6/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMax ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMax() [7/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMax ( PC const &  pc,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version uses "Max" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
FAB::value_type ReduceMax(FabArray< FAB > const &fa, int nghost, F &&f)
Definition AMReX_FabArrayUtility.H:555

◆ ReduceMax() [8/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMax ( PC const &  pc,
int  lev,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version uses "Mas" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceMax() [9/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMax ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version uses "Max" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mx = amrex::ReduceMax(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceMin() [1/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceMin ( FabArray< FAB > const &  fa,
int  nghost,
F &&  f 
)

◆ ReduceMin() [2/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceMin ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMin() [3/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMin ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
int  nghost,
F &&  f 
)

◆ ReduceMin() [4/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMin ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMin() [5/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMin ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
int  nghost,
F &&  f 
)

◆ ReduceMin() [6/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceMin ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceMin() [7/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMin ( PC const &  pc,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version uses "Min" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});
FAB::value_type ReduceMin(FabArray< FAB > const &fa, int nghost, F &&f)
Definition AMReX_FabArrayUtility.H:317

◆ ReduceMin() [8/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMin ( PC const &  pc,
int  lev,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version uses "Min" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceMin() [9/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceMin ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version uses "Min" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto mn = amrex::ReduceMin(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceSum() [1/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceSum ( FabArray< FAB > const &  fa,
int  nghost,
F &&  f 
)

◆ ReduceSum() [2/9]

template<class FAB , class F , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
FAB::value_type amrex::ReduceSum ( FabArray< FAB > const &  fa,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceSum() [3/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceSum ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
int  nghost,
F &&  f 
)

◆ ReduceSum() [4/9]

template<class FAB1 , class FAB2 , class FAB3 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceSum ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
FabArray< FAB3 > const &  fa3,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceSum() [5/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceSum ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
int  nghost,
F &&  f 
)

◆ ReduceSum() [6/9]

template<class FAB1 , class FAB2 , class F , class bar = std::enable_if_t<IsBaseFab<FAB1>::value>>
FAB1::value_type amrex::ReduceSum ( FabArray< FAB1 > const &  fa1,
FabArray< FAB2 > const &  fa2,
IntVect const &  nghost,
F &&  f 
)

◆ ReduceSum() [7/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceSum ( PC const &  pc,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates over all particles on all levels.

This version uses "Sum" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});
FAB::value_type ReduceSum(FabArray< FAB > const &fa, int nghost, F &&f)
Definition AMReX_FabArrayUtility.H:16

◆ ReduceSum() [8/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceSum ( PC const &  pc,
int  lev,
F &&  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates only on the specified level.

This version uses "Sum" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
levthe level to operate on
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceSum() [9/9]

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
auto amrex::ReduceSum ( PC const &  pc,
int  lev_min,
int  lev_max,
F const &  f 
) -> decltype(particle_detail::call_f(f, typename PC::ConstPTDType(), int()))

A general reduction method for the particles in a ParticleContainer that can run on either CPUs or GPUs. This version operates from the specified lev_min to lev_max.

This version uses "Sum" as the reduction operation. The quantity reduced over is an arbitrary function of a "superparticle", which contains all the data in the particle type, whether it is stored in AoS or SoA form.

Note that there is no MPI reduction performed at the end of this operation. Users should manually call the MPI reduction operations described in ParallelDescriptor if they want that behavior.

Template Parameters
PCthe ParticleContainer type
Fa function object
Parameters
pcthe ParticleContainer to operate on
lev_minthe minimum level to include
lev_maxthe maximum level to include
fa callable that operates on a single particle. Example forms:
using PType = typename PC::ParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const PType& p) -> ParticleReal
{
return p.rdata(0);
});
using SPType = typename PC::SuperParticleType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const SPType& p) -> int
{
return p.idata(0);
});
using ConstPTDType = typename PC::ConstPTDType;
auto sm = amrex::ReduceSum(pc,
[=] AMREX_GPU_HOST_DEVICE (const ConstPTDType& ptd, const int i) -> ParticleReal
{
return ptd.rdata(0)[i];
});

◆ ReduceToPlane()

template<typename Op , typename T , typename FAB , typename F , std::enable_if_t< IsBaseFab< FAB >::value, int > FOO = 0>
BaseFab< T > amrex::ReduceToPlane ( int  direction,
Box const &  domain,
FabArray< FAB > const &  mf,
F const &  f 
)

Reduce FabArray/MultiFab data to a plane Fab.

This function takes a FabArray/MultiFab and reduces its data to a plane. The return data are stored in a BaseFab with only one cell in the normal direction of the plane. The index range of the BaseFab in the other directions is the same as the provided domain Box. If data do not exist along a certain line, the value is set to the minimum, maximum and zero, for reduce max, min and sum, respectively. The reduction is local and the user may need to do MPI communication afterwards if needed.

In the example code below, the sum along each line at (i,j) in the z-direction is computed and stored at (i,j,0) of the returned BaseFab.

int dir = 2; // z-direction
auto const& domain_box = geom.Domain();
auto const& ma = mf.const_arrays();
auto rr = ReduceToPlane<ReduceOpSum,Real>(dir, domain_box, mf,
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) -> Real
{
return ma[box_no](i,j,k); // data at (i,j,k) of Box box_no
});

Below is another example. This finds the maximum value in the x-direction and stores the maximum value and the i-index. An MPI reduce is then called to further reduce the data to the root process 0.

int dir = 0; // x-direction
auto const& domain_box = geom.Domain().surroundingNodes(); // nodal data
auto const& ma = mf.const_arrays();
auto rr = ReduceToPlane<ReduceOpMax,KeyValuePair<Real,int>>
(dir, domain_box, mf,
[=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k)
{
return {ma[box_no](i,j,k), i};
});
ParallelReduce::Max(rr.dataPtr(), rr.size(), root,
// Process root now has the final results.
void Max(KeyValuePair< K, V > &vi, int root, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:254
MPI_Comm Communicator() noexcept
Definition AMReX_ParallelDescriptor.H:223
Definition AMReX_ValLocPair.H:10
Template Parameters
Opreduce operator (e.g., ReduceOpSum, ReduceOpMin and ReduceOpMax)
Tdata type of reduction result
FABFabArray/MultiFab type
Fcallable type like a lambda function
Parameters
directionnormal direction of the plane (e.g., 0, 1 and 2)
domaindomain Box
mfa FabArray/MultiFab object specifying the iteration space
fa callable object returning T. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (BaseFab<T>)

◆ ReduceToPlaneMF()

template<typename Op , typename FA , typename F , std::enable_if_t< IsMultiFabLike_v< FA >, int > FOO = 0>
FA amrex::ReduceToPlaneMF ( int  direction,
Box const &  domain,
FA const &  mf,
F const &  f 
)

Reduce FabArray/MultiFab data to plane FabArray.

This function takes a FabArray/MultiFab and reduce its data to a plane. The first template parameter specifies the reduction operation. Currently only ReduceOpSum is supported. The return data is a FabArray whose BoxArray is based on the input FabArray with each box shrunk to just one cell in the the specified direction and its index set to zero. Its DistributionMapping is the same as the input FabArray. Note that the returned FA may contain duplicated Boxes. It contains global reduction results that have been MPI reduced. This behavior is different from that of ReduceToPlane that returns local reduction results.

auto const& ma = mf.const_arrays();
auto mf2 = ReduceToPlaneMF<ReduceOpSum>
(2, domain, mf, [=] AMREX_GPU_DEVICE (int b, int i, int j, int k)
{
return ma[b](i,j,k);
});
auto nz = domain.length(2); // number of cells in z-direction
auto const& ma2 = mf2.const_arrays();
ParallelFor(mf, [=] AMREX_GPU_DEVICE (int b, int i, int j, int k)
{
ma[b](i,j,k) -= ma2[b](i,j,0) / nz; // (i,j,0) is used to access ma2
});
// If sync is needed, call Gpu::streamSynchronize()
// mf now contains data with z-direction line average subtracted
Template Parameters
Opreduce operator (Must be ReduceSum for now.)
FAType of FabArray
Fcallable type like a lambda function
Parameters
directionnormal direction of the plane (e.g., 0, 1 and 2)
domaindomain Box
mfa FabArray/MultiFab object specifying the iteration space
fa callable object returning data fore reduction. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (FA)

◆ ReduceToPlaneMF2()

template<typename Op , typename FA , typename F , std::enable_if_t< IsMultiFabLike_v< FA >, int > FOO = 0>
std::pair< FA, FA > amrex::ReduceToPlaneMF2 ( int  direction,
Box const &  domain,
FA const &  mf,
F const &  f 
)

Reduce FabArray/MultiFab data to plane FabArray.

This function takes a FabArray/MultiFab and reduce its data to a plane. The first template parameter specifies the reduction operation. Currently only ReduceOpSum is supported. The return data are a pair of FabArrays. The first FA's BoxArray is based on the input FabArray with each box shrunk to just one cell in the the specified direction and its index set to zero. Its DistributionMapping is the same as the input FabArray. The second FA's BoxArray is a new 2D BoxArray with only one cell in the specified direction. The first FA may contain duplicated Boxes, whereas the second one is unique. The local reduction results are stored in the first FA, whereas the global results (including MPI communication) are in the second FA. Below is and an example.

auto const& ma = mf.const_arrays();
auto [mf2, mf2_unique] = ReduceToPlaneMF2<ReduceOpSum>
(2, domain, mf, [=] AMREX_GPU_DEVICE (int b, int i, int j, int k)
{
return ma[b](i,j,k);
});
// mf2: box local reduction result
// mf2_unique: global reduction result
auto phi = poisson_solver(mf2_unique); // Perform 2D Poisson solve
mf2.ParallelCopy(phi); // Each Fab in mf2 has the 2D Poisson solver result
Template Parameters
Opreduce operator (Must be ReduceSum for now.)
FAType of FabArray
Fcallable type like a lambda function
Parameters
directionnormal direction of the plane (e.g., 0, 1 and 2)
domaindomain Box
mfa FabArray/MultiFab object specifying the iteration space
fa callable object returning data fore reduction. It takes four ints, where the first int is the local box index and the others are spatial indices for x, y, and z-directions.
Returns
reduction result (std::pair<FA,FA>)

◆ refine() [1/7]

void amrex::refine ( BoxDomain dest,
const BoxDomain fin,
int  ratio 
)

Refine all Boxes in the domain by the refinement ratio and return the result in dest.

◆ refine() [2/7]

BoxArray amrex::refine ( const BoxArray ba,
const IntVect ratio 
)

◆ refine() [3/7]

BoxArray amrex::refine ( const BoxArray ba,
int  ratio 
)

◆ refine() [4/7]

BoxList amrex::refine ( const BoxList bl,
int  ratio 
)

Returns a new BoxList in which each Box is refined by the given ratio.

◆ refine() [5/7]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ constexpr Dim3 amrex::refine ( Dim3 const &  coarse,
IntVectND< dim > const &  ratio 
)
inlineconstexprnoexcept

◆ refine() [6/7]

Geometry amrex::refine ( Geometry const &  crse,
int  rr 
)
inline

◆ refine() [7/7]

Geometry amrex::refine ( Geometry const &  crse,
IntVect const &  rr 
)
inline

◆ reflect()

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::reflect ( const IntVectND< dim > &  a,
int  ref_ix,
int  idir 
)
inlineconstexprnoexcept

Returns an IntVectND that is the reflection of input in the plane which passes through ref_ix and normal to the coordinate direction idir.

◆ RemoveDuplicates() [1/2]

template<class T >
void amrex::RemoveDuplicates ( Vector< T > &  vec)

◆ RemoveDuplicates() [2/2]

template<class T , class H >
void amrex::RemoveDuplicates ( Vector< T > &  vec)

◆ removeInvalidParticles()

template<typename PTile >
void amrex::removeInvalidParticles ( PTile &  ptile)

◆ removeOverlap()

BoxList amrex::removeOverlap ( const BoxList bl)

Return BoxList which covers the same area but has no overlapping boxes.

◆ ResetTotalBytesAllocatedInFabsHWM()

void amrex::ResetTotalBytesAllocatedInFabsHWM ( )
noexcept

◆ SameIteratorsOK()

template<class PC1 , class PC2 >
bool amrex::SameIteratorsOK ( const PC1 &  pc1,
const PC2 &  pc2 
)

◆ Saxpy() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Saxpy ( Array< MF, N > &  dst,
typename MF::value_type  a,
Array< MF, N > const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst += a * src

◆ Saxpy() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Saxpy ( MF &  dst,
typename MF::value_type  a,
MF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst += a * src

◆ Saxpy_Saxpy()

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Saxpy_Saxpy ( MF &  dst1,
typename MF::value_type  a1,
MF const &  src1,
MF &  dst2,
typename MF::value_type  a2,
MF const &  src2,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst1 += a1 * src1 followed by dst2 += a2 * src2

◆ Saxpy_Xpay()

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Saxpy_Xpay ( MF &  dst,
typename MF::value_type  a_saxpy,
MF const &  src_saxpy,
typename MF::value_type  a_xpay,
MF const &  src_xpay,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst += a_saxpy * src_saxpy followed by dst = src_xpay + a_xpay * dst

◆ Saypy_Saxpy()

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Saypy_Saxpy ( MF &  dst1,
typename MF::value_type  a1,
MF &  dst2,
typename MF::value_type  a2,
MF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst1 += a1 * dst2 followed by dst2 += a2 * src

◆ Scale() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Scale ( Array< MF, N > &  dst,
typename MF::value_type  val,
int  scomp,
int  ncomp,
int  nghost 
)

dst *= val

◆ scale() [1/2]

template<int dim>
__host__ __device__ constexpr IntVectND< dim > amrex::scale ( const IntVectND< dim > &  p,
int  s 
)
inlineconstexprnoexcept

Returns a IntVectND obtained by multiplying each of the components of this IntVectND by s.

◆ scale() [2/2]

template<int dim>
__host__ __device__ RealVectND< dim > amrex::scale ( const RealVectND< dim > &  p,
Real  s 
)
inlinenoexcept

Returns a RealVectND obtained by multiplying each of the components of the given RealVectND by a scalar.

◆ Scale() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Scale ( MF &  dst,
typename MF::value_type  val,
int  scomp,
int  ncomp,
int  nghost 
)

dst *= val

◆ scatterParticles()

template<typename PTile , typename N , typename Index , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void amrex::scatterParticles ( PTile &  dst,
const PTile &  src,
np,
const Index *  inds 
)

Scatter particles copies particles from contiguous order into an arbitrary order. Specifically, the particle at the index i in src will be copied to the index inds[i] in dst.

Template Parameters
PTilethe particle tile type
Nthe size type, e.g. Long
Indexthe index type, e.g. unsigned int
Parameters
dstthe destination tile
srcthe source tile
npthe number of particles
indspointer to the permutation array

◆ second()

double amrex::second ( )
noexcept

◆ SerializeStringArray()

amrex::Vector< char > amrex::SerializeStringArray ( const Vector< std::string > &  stringArray)

◆ setBC() [1/2]

__host__ __device__ void amrex::setBC ( const Box bx,
const Box domain,
const BCRec bc_dom,
BCRec bcr 
)
inlinenoexcept

Function for setting a BC.

◆ setBC() [2/2]

void amrex::setBC ( const Box bx,
const Box domain,
int  src_comp,
int  dest_comp,
int  ncomp,
const Vector< BCRec > &  bc_dom,
Vector< BCRec > &  bcr 
)
noexcept

Function for setting array of BCs.

◆ setBndry() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::setBndry ( Array< MF, N > &  dst,
typename MF::value_type  val,
int  scomp,
int  ncomp 
)

dst = val in ghost cells.

◆ setBndry() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::setBndry ( MF &  dst,
typename MF::value_type  val,
int  scomp,
int  ncomp 
)

dst = val in ghost cells.

◆ SetErrorHandler()

void amrex::SetErrorHandler ( amrex::ErrorHandler  f)

◆ setFPExcept()

FPExcept amrex::setFPExcept ( FPExcept  excepts)

Set FP exception traps. Linux only. This enables set flags and DISABLES unset flags. This can be used to restore previous settings.

◆ SetHDF5fapl() [1/2]

static void amrex::SetHDF5fapl ( hid_t  fapl,
MPI_Comm  comm 
)
static

◆ SetHDF5fapl() [2/2]

static void amrex::SetHDF5fapl ( hid_t  fapl,
MPI_Comm  comm 
)
static

◆ SetInitSNaN()

void amrex::SetInitSNaN ( bool  v)
noexcept

◆ SetParticleIDandCPU()

__host__ __device__ std::uint64_t amrex::SetParticleIDandCPU ( Long  id,
int  cpu 
)
inlinenoexcept

Set the idcpu value at once, based on a particle id and cpuid

This can be used in initialization and assignments, to avoid writing twice into the same memory bank.

◆ setPoutBaseName()

void amrex::setPoutBaseName ( const std::string &  a_Name)

Set the base name for the parallel output files used by pout().

Changes the base part of the filename for pout() files.

If the file has already been used and this is a different name, close the current file and open a new one.

When in parallel, changes the base name of the pout() files. If pout() has already been called, it closes the current output file and opens a new one (unless the name is the same, in which case it does nothing). In serial, ignores the argument and does nothing.

◆ setVal() [1/2]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::setVal ( Array< MF, N > &  dst,
typename MF::value_type  val 
)

dst = val

◆ setVal() [2/2]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::setVal ( MF &  dst,
typename MF::value_type  val 
)

dst = val

◆ SetVerbose()

void amrex::SetVerbose ( int  v)
noexcept

◆ shift() [1/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::shift ( const BoxND< dim > &  b,
const IntVectND< dim > &  nzones 
)
inlinenoexcept

◆ shift() [2/2]

template<int dim>
__host__ __device__ BoxND< dim > amrex::shift ( const BoxND< dim > &  b,
int  dir,
int  nzones 
)
inlinenoexcept

Return a BoxND with indices shifted by nzones in dir direction.

◆ single_level_redistribute()

void amrex::single_level_redistribute ( MultiFab div_tmp_in,
MultiFab div_out,
int  div_comp,
int  ncomp,
const Geometry geom 
)

◆ single_level_weighted_redistribute()

void amrex::single_level_weighted_redistribute ( MultiFab div_tmp_in,
MultiFab div_out,
const MultiFab weights,
int  div_comp,
int  ncomp,
const Geometry geom,
bool  use_wts_in_divnc 
)

◆ single_product()

template<typename... Ls, typename A >
constexpr auto amrex::single_product ( TypeList< Ls... >  ,
 
)
constexpr

◆ single_task() [1/2]

template<typename L >
void amrex::single_task ( gpuStream_t  stream,
L const &  f 
)
noexcept

◆ single_task() [2/2]

template<typename L >
void amrex::single_task ( L &&  f)
noexcept

◆ Sleep()

void amrex::Sleep ( double  sleepsec)

◆ split()

std::vector< std::string > amrex::split ( std::string const &  s,
std::string const &  sep 
)

Split a string using given tokens in sep.

◆ SpMV() [1/2]

template<typename T , template< typename > class AllocM, typename AllocV >
void amrex::SpMV ( AlgVector< T, AllocV > &  y,
SpMatrix< T, AllocM > const &  A,
AlgVector< T, AllocV > const &  x 
)

◆ SpMV() [2/2]

template<typename T >
void amrex::SpMV ( Long  nrows,
Long  ncols,
T *__restrict__  py,
CsrView< T const > const &  A,
T const *__restrict__  px 
)

◆ sqrt()

template<typename T >
__host__ __device__ GpuComplex< T > amrex::sqrt ( const GpuComplex< T > &  a_z)
inlinenoexcept

Return the square root of a complex number.

◆ StablePartition() [1/3]

template<typename T , typename F >
int amrex::StablePartition ( Gpu::DeviceVector< T > &  v,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is stable, meaning that, within each side of the resulting array, order is maintained - if element i was before element j in the input, then it will also be before j in the output. If you don't care about this property, use amrex::Partition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
va Gpu::DeviceVector with the data to be partitioned.
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ StablePartition() [2/3]

template<typename T , typename F >
int amrex::StablePartition ( T *  data,
int  beg,
int  end,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is stable, meaning that, within each side of the resulting array, order is maintained - if element i was before element j in the input, then it will also be before j in the output. If you don't care about this property, use amrex::Partition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
datapointer to the data to be partitioned
begindex at which to start
endindex at which to stop (exclusive)
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ StablePartition() [3/3]

template<typename T , typename F >
int amrex::StablePartition ( T *  data,
int  n,
F &&  f 
)

A GPU-capable partition function for contiguous data.

After calling this, all the items for which the predicate is true will be before the items for which the predicate is false in the input array.

This version is stable, meaning that, within each side of the resulting array, order is maintained - if element i was before element j in the input, then it will also be before j in the output. If you don't care about this property, use amrex::Partition instead.

Template Parameters
Ttype of the data to be partitioned.
Ftype of the predicate function.
Parameters
datapointer to the data to be partitioned
nthe number of elements in the array
fpredicate function that returns 1 or 0 for each input

Returns the index of the first element for which f is 0.

◆ StateRedistribute() [1/2]

void amrex::StateRedistribute ( amrex::Box const &  bx,
int  ncomp,
amrex::Array4< amrex::Real > const &  U_out,
amrex::Array4< amrex::Real > const &  U_in,
amrex::Array4< amrex::EBCellFlag const > const &  flag,
amrex::Array4< amrex::Real const > const &  vfrac,
amrex::Array4< amrex::Real const > const &  fcx,
amrex::Array4< amrex::Real const > const &  fcy,
amrex::Array4< amrex::Real const > const &  fcz,
amrex::Array4< amrex::Real const > const &  ccent,
amrex::BCRec const *  d_bcrec_ptr,
amrex::Array4< int const > const &  itracker,
amrex::Array4< amrex::Real const > const &  nrs,
amrex::Array4< amrex::Real const > const &  alpha,
amrex::Array4< amrex::Real const > const &  nbhd_vol,
amrex::Array4< amrex::Real const > const &  cent_hat,
amrex::Geometry const &  geom,
int  max_order = 2 
)

◆ StateRedistribute() [2/2]

void amrex::StateRedistribute ( Box const &  bx,
int  ncomp,
Array4< Real > const &  U_out,
Array4< Real > const &  U_in,
Array4< EBCellFlag const > const &  flag,
Array4< Real const > const &  vfrac,
Array4< Real const > const &  fcx,
Array4< Real const > const &  fcy,
Array4< Real const > const &  fcz,
Array4< Real const > const &  ccent,
amrex::BCRec const *  d_bcrec_ptr,
Array4< int const > const &  itracker,
Array4< Real const > const &  nrs,
Array4< Real const > const &  alpha,
Array4< Real const > const &  nbhd_vol,
Array4< Real const > const &  cent_hat,
Geometry const &  lev_geom,
int  max_order 
)

◆ Subtract() [1/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Subtract ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Subtract() [2/2]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Subtract ( FabArray< FAB > &  dst,
FabArray< FAB > const &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ sum_fine_to_coarse()

void amrex::sum_fine_to_coarse ( const MultiFab S_Fine,
MultiFab S_crse,
int  scomp,
int  ncomp,
const IntVect ratio,
const Geometry cgeom,
const Geometry fgeom 
)

Add a coarsened version of the data contained in the S_fine MultiFab to S_crse, including ghost cells.

◆ sumToLine()

Gpu::HostVector< Real > amrex::sumToLine ( MultiFab const &  mf,
int  icomp,
int  ncomp,
Box const &  domain,
int  direction,
bool  local = false 
)

Sum MultiFab data to line.

Return a HostVector that contains the sum of the given MultiFab data in the plane with the given normal direction. The size of the vector is domain.length(direction) x ncomp. The vector is actually a 2D array, where the element for component icomp at spatial index k is at [icomp+ncomp*k].

Parameters
mfMultiFab data for summing
icompstarting component
ncompnumber of components
domainthe domain
directionthe direction of the line
localIf false, reduce across MPI processes.

◆ surroundingNodes() [1/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::surroundingNodes ( const BoxND< dim > &  b)
inlinenoexcept

Return a BoxND with NODE based coordinates in all directions that encloses BoxND b.

◆ surroundingNodes() [2/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::surroundingNodes ( const BoxND< dim > &  b,
Direction  d 
)
inlinenoexcept

◆ surroundingNodes() [3/3]

template<int dim>
__host__ __device__ BoxND< dim > amrex::surroundingNodes ( const BoxND< dim > &  b,
int  dir 
)
inlinenoexcept

Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b. NOTE: equivalent to b.convert(dir,NODE) NOTE: error if b.type(dir) == NODE.

◆ Swap() [1/3]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Swap ( FabArray< FAB > &  dst,
FabArray< FAB > &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
const IntVect nghost 
)

◆ Swap() [2/3]

template<class FAB , class bar = std::enable_if_t<IsBaseFab<FAB>::value>>
void amrex::Swap ( FabArray< FAB > &  dst,
FabArray< FAB > &  src,
int  srccomp,
int  dstcomp,
int  numcomp,
int  nghost 
)

◆ Swap() [3/3]

template<typename T >
__host__ __device__ void amrex::Swap ( T &  t1,
T &  t2 
)
inlinenoexcept

Swap the given values. std::swap can be used directly instead in GPU codes since C++20.

◆ swapBytes() [1/6]

std::int16_t amrex::swapBytes ( std::int16_t  val)

◆ swapBytes() [2/6]

std::int32_t amrex::swapBytes ( std::int32_t  val)

◆ swapBytes() [3/6]

std::int64_t amrex::swapBytes ( std::int64_t  val)

◆ swapBytes() [4/6]

std::uint16_t amrex::swapBytes ( std::uint16_t  val)

◆ swapBytes() [5/6]

std::uint32_t amrex::swapBytes ( std::uint32_t  val)

◆ swapBytes() [6/6]

std::uint64_t amrex::swapBytes ( std::uint64_t  val)

◆ swapParticle()

template<typename T_ParticleType , int NAR, int NAI>
__host__ __device__ void amrex::swapParticle ( const ParticleTileData< T_ParticleType, NAR, NAI > &  dst,
const ParticleTileData< T_ParticleType, NAR, NAI > &  src,
int  src_i,
int  dst_i 
)
inlinenoexcept

A general single particle swapping routine that can run on the GPU.

Template Parameters
NSRnumber of extra reals in the particle struct
NSInumber of extra ints in the particle struct
NARnumber of reals in the struct-of-arrays
NAInumber of ints in the struct-of-arrays
Parameters
dstthe destination tile
srcthe source tile
src_ithe index in the source to read from
dst_ithe index in the destination to write to

◆ SyncStrings()

void amrex::SyncStrings ( const Vector< std::string > &  localStrings,
Vector< std::string > &  syncedStrings,
bool &  alreadySynced 
)

◆ TagCutCells()

void amrex::TagCutCells ( TagBoxArray tags,
const MultiFab state 
)

◆ TagVolfrac()

void amrex::TagVolfrac ( TagBoxArray tags,
const MultiFab volfrac,
Real  tol 
)

◆ Tie()

template<typename... Args>
__host__ __device__ constexpr GpuTuple< Args &... > amrex::Tie ( Args &...  args)
constexprnoexcept

◆ TilingIfNotGPU()

bool amrex::TilingIfNotGPU ( )
inlinenoexcept

◆ Tokenize()

const std::vector< std::string > & amrex::Tokenize ( const std::string &  instr,
const std::string &  separators 
)

Splits "instr" into separate pieces based on "separators".

◆ ToLongMultiFab()

FabArray< BaseFab< Long > > amrex::ToLongMultiFab ( const iMultiFab imf)

Convert iMultiFab to Long.

◆ toLower()

std::string amrex::toLower ( std::string  s)

Converts all characters of the string into lower case based on std::locale.

◆ ToMultiFab()

MultiFab amrex::ToMultiFab ( const iMultiFab imf)

Convert iMultiFab to MultiFab.

◆ ToString() [1/2]

template<class T >
std::string amrex::ToString ( const T &  t,
const char *  symbol_begin = "[",
const char *  symbol_delim = ", ",
const char *  symbol_end = "]",
const char *  symbol_str = "\"",
int  limit = 100,
std::ostringstream  ss = std::ostringstream{} 
)

◆ ToString() [2/2]

template<class T >
std::ostream & amrex::ToString ( std::ostream &  os,
const T &  t,
const char *  symbol_begin = "[",
const char *  symbol_delim = ", ",
const char *  symbol_end = "]",
const char *  symbol_str = "\"",
int  limit = 100 
)

◆ TotalBytesAllocatedInFabs()

Long amrex::TotalBytesAllocatedInFabs ( )
noexcept

◆ TotalBytesAllocatedInFabsHWM()

Long amrex::TotalBytesAllocatedInFabsHWM ( )
noexcept

◆ TotalCellsAllocatedInFabs()

Long amrex::TotalCellsAllocatedInFabs ( )
noexcept

◆ TotalCellsAllocatedInFabsHWM()

Long amrex::TotalCellsAllocatedInFabsHWM ( )
noexcept

◆ toUnderlying()

template<typename T , typename ET = amrex_enum_traits<T>, std::enable_if_t< ET::value, int > = 0>
constexpr auto amrex::toUnderlying ( v)
constexprnoexcept

Return the underlying (u)int of an enum value

Useful when building bitmasks.

◆ toUpper()

std::string amrex::toUpper ( std::string  s)

Converts all characters of the string into uppercase based on std::locale.

◆ transformParticles() [1/4]

template<typename DstTile , typename SrcTile , typename F >
void amrex::transformParticles ( DstTile &  dst,
const SrcTile &  src,
F &&  f 
)
noexcept

Apply the function f to all the particles in src, writing the result to dst. This version does all the particles in src.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Fa function object
Parameters
dstthe destination tile
srcthe source tile
fthe function that will be applied to each particle

◆ transformParticles() [2/4]

template<typename DstTile , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void amrex::transformParticles ( DstTile &  dst,
const SrcTile &  src,
Index  src_start,
Index  dst_start,
n,
F const &  f 
)
noexcept

Apply the function f to particles in src, writing the result to dst. This version applies the function to n particles starting at index src_start, writing the result starting at dst_start.

Template Parameters
DstTilethe dst particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Nthe size type, e.g. Long
Fa function object
Parameters
dstthe destination tile
srcthe source tile
src_startthe offset at which to start reading particles from src
dst_startthe offset at which to start writing particles to dst
nthe number of particles
fthe function that will be applied to each particle

◆ transformParticles() [3/4]

template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename F >
void amrex::transformParticles ( DstTile1 &  dst1,
DstTile2 &  dst2,
const SrcTile &  src,
F &&  f 
)
noexcept

Apply the function f to all the particles in src, writing the results to dst1 and dst2. This version does all the particles in src.

Template Parameters
DstTile1the dst1 particle tile type
DstTile2the dst2 particle tile type
SrcTilethe src particle tile type
Fa function object
Parameters
dst1the first destination tile
dst2the second destination tile
srcthe source tile
fthe function that will be applied to each particle

◆ transformParticles() [4/4]

template<typename DstTile1 , typename DstTile2 , typename SrcTile , typename Index , typename N , typename F , std::enable_if_t< std::is_integral_v< Index >, int > foo = 0>
void amrex::transformParticles ( DstTile1 &  dst1,
DstTile2 &  dst2,
const SrcTile &  src,
Index  src_start,
Index  dst1_start,
Index  dst2_start,
n,
F const &  f 
)
noexcept

Apply the function f to particles in src, writing the results to dst1 and dst2. This version applies the function to n particles starting at index src_start, writing the result starting at dst1_start and dst2_start.

Template Parameters
DstTile1the dst1 particle tile type
DstTile2the dst2 particle tile type
SrcTilethe src particle tile type
Indexthe index type, e.g. unsigned int
Nthe size type, e.g. Long
Fa function object
Parameters
dst1the first destination tile
dst2the second destination tile
srcthe source tile
src_startthe offset at which to start reading particles from src
dst1_startthe offset at which to start writing particles to dst1
dst2_startthe offset at which to start writing particles to dst2
nthe number of particles
fthe function that will be applied to each particle

◆ transpose() [1/2]

template<typename T , template< typename > class V>
CSR< T, V > amrex::transpose ( CSR< T, V > const &  csr,
Long  ncols 
)

◆ transpose() [2/2]

template<typename T , template< typename > class Allocator>
SpMatrix< T, Allocator > amrex::transpose ( SpMatrix< T, Allocator > const &  A,
AlgPartition  col_partition 
)

◆ transposeCtoF() [1/2]

template<typename T >
void amrex::transposeCtoF ( T const *  pi,
T *  po,
int  nx,
int  ny 
)

Transpose 2D array (nx,ny) from row-major (i.e. C order) to column-major (Fortran order). The input's unit stride direction is y, whereas the output's unit stride direction is x. Note that for GPU builds, the kernel runs on the current GPU stream asynchronously with respect to the host. If synchronization is needed, it's up to the user to call amrex::Gpu::streamSynchronize().

◆ transposeCtoF() [2/2]

template<typename T >
void amrex::transposeCtoF ( T const *  pi,
T *  po,
int  nx,
int  ny,
int  nz 
)

Transpose 3D array (nx,ny,nz) from row-major (i.e. C order) to column-major (Fortran order). The input's unit stride direction is z, whereas the output's unit stride direction is x. Note that for GPU builds, the kernel runs on the current GPU stream asynchronously with respect to the host. If synchronization is needed, it's up to the user to call amrex::Gpu::streamSynchronize().

◆ trim()

std::string amrex::trim ( std::string  s,
std::string const &  space = " \t" 
)

Trim leading and trailing characters in the optional space argument.

◆ TupleCat() [1/3]

template<typename TP >
__host__ __device__ constexpr auto amrex::TupleCat ( TP &&  a) -> typename detail::tuple_cat_result<detail::tuple_decay_t<TP> >::type
constexpr

◆ TupleCat() [2/3]

template<typename TP1 , typename TP2 >
__host__ __device__ constexpr auto amrex::TupleCat ( TP1 &&  a,
TP2 &&  b 
) -> typename detail::tuple_cat_result<detail::tuple_decay_t<TP1>, detail::tuple_decay_t<TP2> >::type
constexpr

◆ TupleCat() [3/3]

template<typename TP1 , typename TP2 , typename... TPs>
__host__ __device__ constexpr auto amrex::TupleCat ( TP1 &&  a,
TP2 &&  b,
TPs &&...  args 
) -> typename detail::tuple_cat_result<detail::tuple_decay_t<TP1>, detail::tuple_decay_t<TP2>, detail::tuple_decay_t<TPs>...>::type
constexpr

◆ TupleSplit()

template<std::size_t... Is, typename... Args>
__host__ __device__ constexpr auto amrex::TupleSplit ( const GpuTuple< Args... > &  tup)
constexprnoexcept

Returns a GpuTuple of GpuTuples obtained by splitting the input GpuTuple according to the sizes specified by the template arguments.

◆ tupleToArray() [1/2]

template<typename T >
__host__ __device__ constexpr auto amrex::tupleToArray ( GpuTuple< T > const &  tup)
constexpr

◆ tupleToArray() [2/2]

template<typename T , typename T2 , typename... Ts, std::enable_if_t< Same< T, T2, Ts... >::value, int > = 0>
__host__ __device__ constexpr auto amrex::tupleToArray ( GpuTuple< T, T2, Ts... > const &  tup)
constexpr

Convert GpuTuple<T,T2,Ts...> to GpuArray.

◆ ubound() [1/2]

template<class T >
__host__ __device__ Dim3 amrex::ubound ( Array4< T > const &  a)
inlinenoexcept

◆ ubound() [2/2]

template<int dim, std::enable_if_t<(1<=dim &&dim<=3), int > = 0>
__host__ __device__ Dim3 amrex::ubound ( BoxND< dim > const &  box)
inlinenoexcept

◆ ubound_iv()

template<int dim>
__host__ __device__ IntVectND< dim > amrex::ubound_iv ( BoxND< dim > const &  box)
inlinenoexcept

◆ UniqueString()

std::string amrex::UniqueString ( )

Create a (probably) unique string.

◆ unpackBuffer()

template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::unpackBuffer ( PC &  pc,
const ParticleCopyPlan plan,
const Buffer &  snd_buffer,
UnpackPolicy const &  policy 
)

◆ unpackRemotes()

template<class PC , class Buffer , class UnpackPolicy , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::unpackRemotes ( PC &  pc,
const ParticleCopyPlan plan,
Buffer &  rcv_buffer,
UnpackPolicy const &  policy 
)

◆ UnSerializeStringArray()

amrex::Vector< std::string > amrex::UnSerializeStringArray ( const Vector< char > &  charArray)

◆ update_fab_stats() [1/2]

void amrex::update_fab_stats ( Long  n,
Long  s,
size_t  szt 
)
noexcept

◆ update_fab_stats() [2/2]

void amrex::update_fab_stats ( Long  n,
Long  s,
std::size_t  szt 
)
noexcept

◆ upper_bound()

template<typename ItType , typename ValType >
__host__ __device__ ItType amrex::upper_bound ( ItType  first,
ItType  last,
const ValType &  val 
)

Return an iterator to the first element greater than a given value.

This function is an implementation of std::upper_bound that works on both host and device.

Template Parameters
ItTypeiterator type.
ValTypevalue type.
Parameters
firstinclusive lower bound of the search range.
lastexclusive upper bound of the search range.
valvalue to compare the elements to.
Returns
an iterator pointing to the first element greater than val.

◆ UtilCreateCleanDirectory()

void amrex::UtilCreateCleanDirectory ( const std::string &  path,
bool  callbarrier = true 
)

Create a new directory, renaming the old one if it exists.

◆ UtilCreateDirectory()

bool amrex::UtilCreateDirectory ( const std::string &  path,
mode_t  mode,
bool  verbose = false 
)

Creates the specified directories. path may be either a full pathname or a relative pathname. It will create all the directories in the pathname, if they don't already exist, so that on successful return the pathname refers to an existing directory. Returns true or false depending upon whether or not it was successful. Also returns true if path is NULL or "/". mode is the mode passed to mkdir() for any directories that must be created (for example: 0755). verbose will print out the directory creation steps.

For example, if it is passed the string "/a/b/c/d/e/f/g", it will return successfully when all the directories in the pathname exist; i.e. when the full pathname is a valid directory.

In a Windows environment, the path separator is a '\', so that if using the example given above you must pass the string "\\a\\b\\c\\d\\e\\f\\g" (Note that you must escape the backslash in a character string),

Only the last mkdir return value is checked for success as errno may not be set to EEXIST if a directory exists but mkdir has other reasons to fail such as part of the path being a read-only filesystem (EROFS). If this function fails, it will print out an error stack.

◆ UtilCreateDirectoryDestructive()

void amrex::UtilCreateDirectoryDestructive ( const std::string &  path,
bool  callbarrier = true 
)

Create a new directory, removing old one if it exists.

◆ UtilRenameDirectoryToOld()

void amrex::UtilRenameDirectoryToOld ( const std::string &  path,
bool  callbarrier = true 
)

Rename a current directory if it exists.

◆ Verbose()

int amrex::Verbose ( )
noexcept

◆ Version()

std::string amrex::Version ( )

the AMReX "git describe" version

◆ volumeWeightedSum()

Real amrex::volumeWeightedSum ( Vector< MultiFab const * > const &  mf,
int  icomp,
Vector< Geometry > const &  geom,
Vector< IntVect > const &  ratio,
bool  local = false 
)

Volume weighted sum for a vector of MultiFabs.

Return a volume weighted sum of MultiFabs of AMR data. The sum is perform on a single component of the data. If the MultiFabs are built with EB Factories, the cut cell volume fraction will be included in the weight.

◆ Warning() [1/2]

__host__ __device__ void amrex::Warning ( const char *  msg)
inline

◆ Warning() [2/2]

void amrex::Warning ( const std::string &  msg)

Print out warning message to cerr.

◆ Warning_host()

void amrex::Warning_host ( const char *  msg)

◆ Write()

template<typename FAB >
std::enable_if_t< std::is_same_v< FAB, IArrayBox > > amrex::Write ( const FabArray< FAB > &  fa,
const std::string &  name 
)

Write iMultiFab/FabArray<IArrayBox>

This writes an iMultiFab/FabArray<IArrayBox> to files on disk, including a clear text file NAME_H and binary files NAME_D_00000 etc.

Parameters
fais the iMultiFab to be written.
nameis the base name for the files.

◆ write_to_stderr_without_buffering()

void amrex::write_to_stderr_without_buffering ( const char *  str)

This is used by amrex::Error(), amrex::Abort(), and amrex::Assert() to ensure that when writing the message to stderr, that no additional heap-based memory is allocated.

◆ WriteBinaryParticleDataAsync()

template<class PC , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::WriteBinaryParticleDataAsync ( PC const &  pc,
const std::string &  dir,
const std::string &  name,
const Vector< int > &  write_real_comp,
const Vector< int > &  write_int_comp,
const Vector< std::string > &  real_comp_names,
const Vector< std::string > &  int_comp_names,
bool  is_checkpoint 
)

◆ WriteBinaryParticleDataSync()

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::WriteBinaryParticleDataSync ( PC const &  pc,
const std::string &  dir,
const std::string &  name,
const Vector< int > &  write_real_comp,
const Vector< int > &  write_int_comp,
const Vector< std::string > &  real_comp_names,
const Vector< std::string > &  int_comp_names,
F const &  f,
bool  is_checkpoint 
)

◆ writeData() [1/4]

void amrex::writeData ( double const *  data,
std::size_t  size,
std::ostream &  os 
)
inline

◆ writeData() [2/4]

void amrex::writeData ( float const *  data,
std::size_t  size,
std::ostream &  os 
)
inline

◆ writeData() [3/4]

void amrex::writeData ( int const *  data,
std::size_t  size,
std::ostream &  os 
)
inline

◆ writeData() [4/4]

void amrex::writeData ( Long const *  data,
std::size_t  size,
std::ostream &  os 
)
inline

◆ writeDoubleData()

void amrex::writeDoubleData ( const double *  data,
std::size_t  size,
std::ostream &  os,
const RealDescriptor rd = FPC::Native64RealDescriptor() 
)

Write double data to the ostream. The arguments are a pointer to data to write, the size of the data buffer, the ostream, and an optional RealDescriptor that describes the data format to use for writing. If no RealDescriptor is provided, the data will be written using the native format for your machine.

◆ WriteEBSurface()

void amrex::WriteEBSurface ( const BoxArray ba,
const DistributionMapping dmap,
const Geometry geom,
const EBFArrayBoxFactory ebf 
)

◆ writeFabs() [1/2]

void amrex::writeFabs ( const MultiFab mf,
const std::string &  name 
)

Write each fab individually.

◆ writeFabs() [2/2]

void amrex::writeFabs ( const MultiFab mf,
int  comp,
int  ncomp,
const std::string &  name 
)

◆ writeFloatData()

void amrex::writeFloatData ( const float *  data,
std::size_t  size,
std::ostream &  os,
const RealDescriptor rd = FPC::Native32RealDescriptor() 
)

Write float data to the ostream. The arguments are a pointer to data to write, the size of the data buffer, the ostream, and an optional RealDescriptor that describes the data format to use for writing. If no RealDescriptor is provided, the data will be written using the native format for your machine.

◆ WriteGenericPlotfileHeader()

void amrex::WriteGenericPlotfileHeader ( std::ostream &  HeaderFile,
int  nlevels,
const Vector< BoxArray > &  bArray,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  versionName = "HyperCLaw-V1.1",
const std::string &  levelPrefix = "Level_",
const std::string &  mfPrefix = "Cell" 
)

write a generic plot file header to the file plotfilename/Header the plotfilename directory must already exist

◆ WriteGenericPlotfileHeaderHDF5()

static void amrex::WriteGenericPlotfileHeaderHDF5 ( hid_t  fid,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< BoxArray > &  bArray,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)
static

◆ WriteHDF5ParticleDataSync()

template<class PC , class F , std::enable_if_t< IsParticleContainer< PC >::value, int > foo = 0>
void amrex::WriteHDF5ParticleDataSync ( PC const &  pc,
const std::string &  dir,
const std::string &  name,
const Vector< int > &  write_real_comp,
const Vector< int > &  write_int_comp,
const Vector< std::string > &  real_comp_names,
const Vector< std::string > &  int_comp_names,
const std::string &  compression,
F &&  f,
bool  is_checkpoint 
)

◆ writeIntData() [1/2]

template<typename To , typename From >
void amrex::writeIntData ( const From *  data,
std::size_t  size,
std::ostream &  os,
const amrex::IntDescriptor id 
)

◆ writeIntData() [2/2]

void amrex::writeIntData ( const int data,
std::size_t  size,
std::ostream &  os,
const IntDescriptor id = FPC::NativeIntDescriptor() 
)

Functions for writing integer data to disk in a portable, self-describing manner.

Write int data to the ostream. The arguments are a pointer to data to write, the size of the data buffer, the ostream, and an optional IntDescriptor that describes the data format to use for writing. If no IntDescriptor is provided, the data will be written using the native format for your machine.

◆ writeLongData()

void amrex::writeLongData ( const Long data,
std::size_t  size,
std::ostream &  os,
const IntDescriptor id = FPC::NativeLongDescriptor() 
)

Write long data to the ostream. The arguments are a pointer to data to write, the size of the data buffer, the ostream, and an optional IntDescriptor that describes the data format to use for writing. If no IntDescriptor is provided, the data will be written using the native format for your machine.

◆ WriteMLMF()

void amrex::WriteMLMF ( const std::string &  plotfilename,
const Vector< const MultiFab * > &  mf,
const Vector< Geometry > &  geom 
)

write a plotfile to disk given: -plotfile name -vector of MultiFabs -vector of Geometrys variable names are written as "Var0", "Var1", etc. refinement ratio is computed from the Geometry vector "time" and "level_steps" are set to zero

Parameters
&plotfilename
mf
&geom

◆ WriteMultiLevelPlotfileHDF5()

void amrex::WriteMultiLevelPlotfileHDF5 ( const std::string &  plotfilename,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteMultiLevelPlotfileHDF5MultiDset()

void amrex::WriteMultiLevelPlotfileHDF5MultiDset ( const std::string &  plotfilename,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteMultiLevelPlotfileHDF5SingleDset()

void amrex::WriteMultiLevelPlotfileHDF5SingleDset ( const std::string &  plotfilename,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteMultiLevelPlotfileHeaders()

void amrex::WriteMultiLevelPlotfileHeaders ( const std::string &  plotfilename,
int  nlevels,
const Vector< const MultiFab * > &  mf,
const Vector< std::string > &  varnames,
const Vector< Geometry > &  geom,
Real  time,
const Vector< int > &  level_steps,
const Vector< IntVect > &  ref_ratio,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ writeRealData()

void amrex::writeRealData ( const Real data,
std::size_t  size,
std::ostream &  os,
const RealDescriptor rd = FPC::NativeRealDescriptor() 
)

Write Real data to the ostream. The arguments are a pointer to data to write, the size of the data buffer, the ostream, and an optional RealDescriptor that describes the data format to use for writing. If no RealDescriptor is provided, the data will be written using the native format for your machine.

◆ WriteSingleLevelPlotfile()

void amrex::WriteSingleLevelPlotfile ( const std::string &  plotfilename,
const MultiFab mf,
const Vector< std::string > &  varnames,
const Geometry geom,
Real  time,
int  level_step,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteSingleLevelPlotfileHDF5()

void amrex::WriteSingleLevelPlotfileHDF5 ( const std::string &  plotfilename,
const MultiFab mf,
const Vector< std::string > &  varnames,
const Geometry geom,
Real  time,
int  level_step,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteSingleLevelPlotfileHDF5MultiDset()

void amrex::WriteSingleLevelPlotfileHDF5MultiDset ( const std::string &  plotfilename,
const MultiFab mf,
const Vector< std::string > &  varnames,
const Geometry geom,
Real  time,
int  level_step,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ WriteSingleLevelPlotfileHDF5SingleDset()

void amrex::WriteSingleLevelPlotfileHDF5SingleDset ( const std::string &  plotfilename,
const MultiFab mf,
const Vector< std::string > &  varnames,
const Geometry geom,
Real  time,
int  level_step,
const std::string &  compression,
const std::string &  versionName,
const std::string &  levelPrefix,
const std::string &  mfPrefix,
const Vector< std::string > &  extra_dirs 
)

◆ Xpay() [1/3]

template<typename T , typename Allocator >
void amrex::Xpay ( AlgVector< T, Allocator > &  y,
a,
AlgVector< T, Allocator > const &  x 
)

y = x + a*y. For GPU guilds, this function is asynchronous with respect to the host.

◆ Xpay() [2/3]

template<class MF , std::size_t N, std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Xpay ( Array< MF, N > &  dst,
typename MF::value_type  a,
Array< MF, N > const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = src + a * dst

◆ Xpay() [3/3]

template<class MF , std::enable_if_t< IsMultiFabLike_v< MF >, int > = 0>
void amrex::Xpay ( MF &  dst,
typename MF::value_type  a,
MF const &  src,
int  scomp,
int  dcomp,
int  ncomp,
IntVect const &  nghost 
)

dst = src + a * dst

Variable Documentation

◆ cell_bilinear_interp

CellBilinear amrex::cell_bilinear_interp

◆ cell_cons_interp

CellConservativeLinear amrex::cell_cons_interp ( false  )

◆ cell_quartic_interp

CellQuartic amrex::cell_quartic_interp

◆ E_ixtype

constexpr std::array<IntVect,3> amrex::E_ixtype {IntVect(0,1,1),IntVect(1,0,1),IntVect(1,1,0)}
staticconstexpr

◆ eb_cell_cons_interp

EBCellConservativeLinear amrex::eb_cell_cons_interp ( false  )

◆ eb_covered_val

constexpr amrex::Real amrex::eb_covered_val = amrex::Real(1.e40)
staticconstexpr

◆ eb_lincc_interp

EBCellConservativeLinear amrex::eb_lincc_interp

◆ eb_mf_cell_cons_interp

EBMFCellConsLinInterp amrex::eb_mf_cell_cons_interp ( false  )

◆ eb_mf_lincc_interp

EBMFCellConsLinInterp amrex::eb_mf_lincc_interp ( true  )

◆ face_cons_linear_interp

FaceConservativeLinear amrex::face_cons_linear_interp

◆ face_divfree_interp

FaceDivFree amrex::face_divfree_interp

◆ face_linear_interp

FaceLinear amrex::face_linear_interp

◆ gpu_rand_state

randState_t * amrex::gpu_rand_state = nullptr

◆ gpuSuccess

constexpr gpuError_t amrex::gpuSuccess = cudaSuccess
constexpr

◆ int

const amrex::int[]

◆ INVALID_TIME

constexpr Real amrex::INVALID_TIME = -1.0e200_rt
staticconstexpr

◆ IsBaseFab_v

template<class A >
constexpr bool amrex::IsBaseFab_v = IsBaseFab<A>::value
inlineconstexpr

◆ IsConvertible_v

template<typename T , typename... Args>
constexpr bool amrex::IsConvertible_v = IsConvertible<T, Args...>::value
inlineconstexpr

◆ IsFabArray_v

template<class A >
constexpr bool amrex::IsFabArray_v = IsFabArray<A>::value
inlineconstexpr

◆ IsMultiFabLike_v

template<class M >
constexpr bool amrex::IsMultiFabLike_v = IsMultiFabLike<M>::value
inlineconstexpr

◆ IsNarrowingConversion_v

template<typename From , typename To >
constexpr bool amrex::IsNarrowingConversion_v = IsNarrowingConversion<From, To>::value
inlineconstexpr

◆ IsNonNarrowingConversion_v

template<typename From , typename To >
constexpr bool amrex::IsNonNarrowingConversion_v = !IsNarrowingConversion<From, To>::value
inlineconstexpr

◆ lincc_interp

CellConservativeLinear amrex::lincc_interp

◆ mf_cell_bilinear_interp

MFCellBilinear amrex::mf_cell_bilinear_interp

◆ mf_cell_cons_interp

MFCellConsLinInterp amrex::mf_cell_cons_interp ( false  )

◆ mf_lincc_interp

MFCellConsLinInterp amrex::mf_lincc_interp ( true  )

◆ mf_linear_slope_minmax_interp

MFCellConsLinMinmaxLimitInterp amrex::mf_linear_slope_minmax_interp

◆ mf_node_bilinear_interp

MFNodeBilinear amrex::mf_node_bilinear_interp

◆ mf_pc_interp

MFPCInterp amrex::mf_pc_interp

◆ MFNEWDATA

constexpr int amrex::MFNEWDATA = 0
staticconstexpr

◆ MFOLDDATA

constexpr int amrex::MFOLDDATA = 1
staticconstexpr

◆ node_bilinear_interp

NodeBilinear amrex::node_bilinear_interp

◆ pc_interp

PCInterp amrex::pc_interp

CONSTRUCT A GLOBAL OBJECT OF EACH VERSION.

◆ protected_interp

CellConservativeProtected amrex::protected_interp

◆ quadratic_interp

CellQuadratic amrex::quadratic_interp

◆ quartic_interp

CellConservativeQuartic amrex::quartic_interp

◆ ResetDisplay

constexpr char amrex::ResetDisplay[] = "\033[0m"
constexpr

◆ SpaceDim

constexpr int amrex::SpaceDim = 3
constexpr

◆ sys_name

const char amrex::sys_name[] = "IEEE"
static