4 #include <AMReX_Config.H>
57 template <typename T, std::enable_if_t<!IsBaseFab<T>::value,
int> = 0>
91 template <
typename T,
typename... Ts>
93 tags.emplace_back(std::forward<T>(t));
94 return SetTag(std::forward<Ts>(ts)...);
150 template <
typename T>
160 explicit operator bool() const noexcept {
171 template <
class FAB>
class FabArray;
173 template <
class DFAB,
class SFAB,
174 std::enable_if_t<std::conjunction_v<
177 typename DFAB::value_type>>,
int> BAR = 0>
181 Copy(dst,src,srccomp,dstcomp,numcomp,
IntVect(nghost));
184 template <
class DFAB,
class SFAB,
185 std::enable_if_t<std::conjunction_v<
186 IsBaseFab<DFAB>, IsBaseFab<SFAB>,
188 typename DFAB::value_type>>,
int> BAR = 0>
194 using DT =
typename DFAB::value_type;
199 if constexpr (std::is_same_v<typename SFAB::value_type, typename DFAB::value_type>) {
208 auto const& dstarr = dst.
arrays();
212 dstarr[box_no](i,j,k,dstcomp+n) = DT(srcarr[box_no](i,j,k,srccomp+n));
219 #pragma omp parallel if (Gpu::notInLaunchRegion())
223 const Box& bx = mfi.growntilebox(nghost);
227 auto const& dstFab = dst.
array(mfi);
230 dstFab(i,j,k,dstcomp+n) = DT(srcFab(i,j,k,srccomp+n));
238 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
242 Add(dst,src,srccomp,dstcomp,numcomp,
IntVect(nghost));
246 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
254 auto const& dstfa = dst.
arrays();
259 dstfa[box_no](i,j,k,n+dstcomp) += srcfa[box_no](i,j,k,n+srccomp);
268 #pragma omp parallel if (Gpu::notInLaunchRegion())
272 const Box& bx = mfi.growntilebox(nghost);
275 auto const srcFab = src.
array(mfi);
276 auto dstFab = dst.
array(mfi);
279 dstFab(i,j,k,n+dstcomp) += srcFab(i,j,k,n+srccomp);
381 #ifdef AMREX_STRICT_MODE
393 #ifdef AMREX_STRICT_MODE
422 #ifdef AMREX_STRICT_MODE
434 #ifdef AMREX_STRICT_MODE
452 return (
f !=
nullptr);
478 return f->isAllRegular();
539 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
542 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
545 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
548 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
551 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
554 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
557 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
560 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
563 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
566 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
569 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
572 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
575 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
578 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
581 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
584 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
587 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
594 template <class F=
FAB, std::enable_if_t<std::is_move_constructible_v<F>,
int> = 0>
601 template <class F=
FAB, std::enable_if_t<std::is_move_constructible_v<F>,
int> = 0>
629 template <typename SFAB, typename DFAB =
FAB,
630 std::enable_if_t<std::conjunction_v<
649 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
654 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
658 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
666 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
672 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
684 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
691 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
701 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
704 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
712 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
715 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
718 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
719 void abs (
int comp,
int ncomp,
int nghost = 0);
721 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
724 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
727 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
730 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
733 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
736 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
739 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
743 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
747 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
751 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
755 template <class F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
765 template <typename F=
FAB, std::enable_if_t<
IsBaseFab<F>::value,
int> = 0>
767 sum (
int comp,
IntVect const& nghost,
bool local = false) const;
783 [[deprecated(
"Use FabArray::ParallelCopy() instead.")]]
817 {
ParallelCopy(src,src_comp,dest_comp,num_comp,0,0,period,op); }
819 [[deprecated(
"Use FabArray::ParallelCopy() instead.")]]
826 {
ParallelCopy(src,src_comp,dest_comp,num_comp, period, op); }
908 IntVect(dst_nghost),period,op); }
919 bool to_ghost_cells_only =
false);
941 [[deprecated(
"Use FabArray::ParallelCopy() instead.")]]
952 [[deprecated(
"Use FabArray::ParallelCopy() instead.")]]
961 {
ParallelCopy(src,src_comp,dest_comp,num_comp,src_nghost,dst_nghost,period,op); }
984 void copyTo (
FAB& dest,
int scomp,
int dcomp,
int ncomp,
int nghost = 0)
const;
1003 template <
typename BUF=value_type>
1006 template <
typename BUF=value_type>
1009 template <
typename BUF=value_type>
1013 template <
typename BUF=value_type>
1016 template <
typename BUF=value_type>
1019 template <
typename BUF=value_type>
1022 template <
typename BUF=value_type>
1025 template <
typename BUF=value_type>
1028 template <
typename BUF=value_type>
1031 template <
typename BUF=value_type>
1034 template <
typename BUF=value_type>
1037 template <
typename BUF=value_type>
1041 class F=
FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1150 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1158 class F=
FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1161 bool enforce_periodicity_only =
false,
1162 bool override_sync =
false);
1166 int scomp,
int dcomp,
int ncomp,
CpOp op);
1168 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1171 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1174 #ifdef AMREX_USE_GPU
1178 int scomp,
int dcomp,
int ncomp,
CpOp op);
1183 #if defined(__CUDACC__)
1185 void FB_local_copy_cuda_graph_1 (
const FB& TheFB,
int scomp,
int ncomp);
1186 void FB_local_copy_cuda_graph_n (
const FB& TheFB,
int scomp,
int ncomp);
1191 #ifdef AMREX_USE_MPI
1193 #ifdef AMREX_USE_GPU
1194 #if defined(__CUDACC__)
1196 void FB_pack_send_buffer_cuda_graph (
const FB& TheFB,
int scomp,
int ncomp,
1201 void FB_unpack_recv_buffer_cuda_graph (
const FB& TheFB,
int dcomp,
int ncomp,
1205 bool is_thread_safe);
1209 template <
typename BUF = value_type>
1215 template <
typename BUF = value_type>
1220 CpOp op,
bool is_thread_safe);
1224 template <
typename BUF = value_type>
1230 template <
typename BUF = value_type>
1235 CpOp op,
bool is_thread_safe);
1248 template <typename F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1249 typename F::value_type
1251 [[maybe_unused]]
bool ignore_covered =
false)
const;
1262 template <typename IFAB, typename F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1263 typename F::value_type
1265 bool local =
false)
const;
1281 #ifdef AMREX_USE_GPU
1296 #if defined(BL_USE_MPI3)
1297 if (win != MPI_WIN_NULL) { MPI_Win_free(&win); }
1307 #if defined(BL_USE_MPI3)
1312 #if defined(BL_USE_MPI3)
1313 rhs.win = MPI_WIN_NULL;
1322 #if defined(BL_USE_MPI3)
1324 rhs.win = MPI_WIN_NULL;
1334 #if defined(BL_USE_MPI3)
1335 MPI_Win win = MPI_WIN_NULL;
1347 bool alloc_single_chunk);
1351 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1361 template <
typename BUF=value_type>
1363 char*& the_recv_data,
1371 template <
typename BUF=value_type>
1381 template <
typename BUF=value_type>
1383 char*& the_send_data,
1391 template <
typename BUF=value_type>
1408 std::unique_ptr<FBData<FAB>>
fbd;
1409 std::unique_ptr<PCData<FAB>>
pcd;
1427 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1429 int xcomp,
int ycomp,
int ncomp,
IntVect const& nghost);
1442 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1444 int xcomp,
int ycomp,
int ncomp,
IntVect const& nghost);
1460 template <class F=FAB, std::enable_if_t<IsBaseFab<F>::value,
int> = 0>
1464 int dstcomp,
int numcomp,
const IntVect& nghost);
1470 template <
class FAB>
1474 int li = localindex(K);
1475 if (li >= 0 && li <
static_cast<int>(m_fabs_v.size()) && m_fabs_v[li] != 0) {
1483 template <
class FAB>
1487 int li = mfi.LocalIndex();
1488 if (li <
static_cast<int>(m_fabs_v.size()) && m_fabs_v[li] !=
nullptr) {
1496 template <
class FAB>
1502 int li = mfi.LocalIndex();
1503 return m_fabs_v[li];
1506 template <
class FAB>
1512 int li = mfi.LocalIndex();
1513 return m_fabs_v[li];
1516 template <
class FAB>
1520 int li = localindex(K);
1522 return m_fabs_v[li];
1525 template <
class FAB>
1529 int li = localindex(K);
1531 return m_fabs_v[li];
1534 template <
class FAB>
1535 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1539 #ifdef AMREX_USE_CUDA
1540 this->fabPtr(mfi)->prefetchToHost();
1546 template <
class FAB>
1547 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1551 #ifdef AMREX_USE_CUDA
1552 this->fabPtr(mfi)->prefetchToDevice();
1558 template <
class FAB>
1559 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1563 return fabPtr(mfi)->const_array();
1566 template <
class FAB>
1567 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1571 return fabPtr(mfi)->array();
1574 template <
class FAB>
1575 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1579 return fabPtr(K)->const_array();
1582 template <
class FAB>
1583 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1587 return fabPtr(K)->array();
1590 template <
class FAB>
1591 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1595 return fabPtr(mfi)->const_array();
1598 template <
class FAB>
1599 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1603 return fabPtr(K)->const_array();
1606 template <
class FAB>
1607 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1611 return fabPtr(mfi)->const_array(start_comp);
1614 template <
class FAB>
1615 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1619 return fabPtr(mfi)->array(start_comp);
1622 template <
class FAB>
1623 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1627 return fabPtr(K)->const_array(start_comp);
1630 template <
class FAB>
1631 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1635 return fabPtr(K)->array(start_comp);
1638 template <
class FAB>
1639 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1643 return fabPtr(mfi)->const_array(start_comp);
1646 template <
class FAB>
1647 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1651 return fabPtr(K)->const_array(start_comp);
1654 template <
class FAB>
1655 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1663 template <
class FAB>
1664 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1669 return m_const_arrays;
1672 template <
class FAB>
1673 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1678 return m_const_arrays;
1681 template <
class FAB>
1682 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1688 static_assert(
sizeof(A) ==
sizeof(AC),
"sizeof(Array4<T>) != sizeof(Array4<T const>)");
1689 if (!m_hp_arrays && local_size() > 0) {
1690 const int n = local_size();
1691 #ifdef AMREX_USE_GPU
1697 for (
int li = 0; li < n; ++li) {
1699 new ((A*)m_hp_arrays+li) A(m_fabs_v[li]->array());
1700 new ((AC*)m_hp_arrays+li+n) AC(m_fabs_v[li]->const_array());
1702 new ((A*)m_hp_arrays+li) A{};
1703 new ((AC*)m_hp_arrays+li+n) AC{};
1706 m_arrays.
hp = (A*)m_hp_arrays;
1707 m_const_arrays.
hp = (AC*)m_hp_arrays + n;
1708 #ifdef AMREX_USE_GPU
1709 m_arrays.
dp = (A*)m_dp_arrays;
1710 m_const_arrays.
dp = (AC*)m_dp_arrays + n;
1716 template <
class FAB>
1720 #ifdef AMREX_USE_GPU
1723 m_dp_arrays =
nullptr;
1727 m_hp_arrays =
nullptr;
1728 m_arrays.
hp =
nullptr;
1729 m_const_arrays.
hp =
nullptr;
1732 template <
class FAB>
1737 const int li = localindex(K);
1738 if (li >= 0 && li <
static_cast<int>(m_fabs_v.size()) && m_fabs_v[li] !=
nullptr) {
1742 for (
auto const& t : m_tags) {
1743 updateMemUsage(t, -nbytes,
nullptr);
1746 return std::exchange(m_fabs_v[li],
nullptr);
1752 template <
class FAB>
1758 if (li >= 0 && li <
static_cast<int>(m_fabs_v.size()) && m_fabs_v[li] !=
nullptr) {
1762 for (
auto const& t : m_tags) {
1763 updateMemUsage(t, -nbytes,
nullptr);
1766 return std::exchange(m_fabs_v[li],
nullptr);
1772 template <
class FAB>
1776 if (define_function_called)
1778 define_function_called =
false;
1783 for (
auto *
x : m_fabs_v) {
1786 m_factory->destroy(
x);
1792 m_dallocator.
m_arena =
nullptr;
1796 for (
auto const& t : m_tags) {
1797 updateMemUsage(t, -nbytes,
nullptr);
1801 if (m_single_chunk_arena) {
1802 m_single_chunk_arena.reset();
1804 m_single_chunk_size = 0;
1811 template <
class FAB>
1812 template <
typename SFAB,
typename DFAB,
1813 std::enable_if_t<std::conjunction_v<
1816 typename DFAB::value_type>>,
int>>
1821 amrex::Copy(*
this, src, scomp, dcomp, ncomp, nghost);
1824 template <
class FAB>
1825 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1830 amrex::Add(*
this, src, scomp, dcomp, ncomp, nghost);
1833 template <
class FAB>
1834 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1841 template <
class FAB>
1842 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1846 setVal(val,0,n_comp,nghost);
1849 template <
class FAB>
1850 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1857 template <
class FAB>
1858 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
1862 setVal(val,region,0,n_comp,nghost);
1865 template <
class FAB>
1869 m_FA_stats.recordBuild();
1872 template <
class FAB>
1877 m_FA_stats.recordBuild();
1880 template <
class FAB>
1890 template <
class FAB>
1897 : m_factory(factory.clone()),
1904 template <
class FAB>
1906 : m_factory(rhs.Factory().clone()),
1916 auto const& rhsfab = *(rhs.
m_fabs_v[i]);
1926 template <
class FAB>
1929 , m_factory (std::move(rhs.m_factory))
1930 , m_dallocator (std::move(rhs.m_dallocator))
1931 , m_single_chunk_arena(std::move(rhs.m_single_chunk_arena))
1932 , m_single_chunk_size(std::exchange(rhs.m_single_chunk_size,0))
1933 , define_function_called(rhs.define_function_called)
1934 , m_fabs_v (std::move(rhs.m_fabs_v))
1935 #ifdef AMREX_USE_GPU
1936 , m_dp_arrays (std::exchange(rhs.m_dp_arrays,
nullptr))
1938 , m_hp_arrays (std::exchange(rhs.m_hp_arrays,
nullptr))
1939 , m_arrays (rhs.m_arrays)
1940 , m_const_arrays(rhs.m_const_arrays)
1941 , m_tags (std::move(rhs.m_tags))
1942 , shmem (std::move(rhs.shmem))
1945 m_FA_stats.recordBuild();
1946 rhs.define_function_called =
false;
1947 rhs.m_fabs_v.clear();
1951 template <
class FAB>
1960 m_factory = std::move(rhs.m_factory);
1961 m_dallocator = std::move(rhs.m_dallocator);
1962 m_single_chunk_arena = std::move(rhs.m_single_chunk_arena);
1963 std::swap(m_single_chunk_size, rhs.m_single_chunk_size);
1964 define_function_called = rhs.define_function_called;
1966 #ifdef AMREX_USE_GPU
1967 std::swap(m_dp_arrays, rhs.m_dp_arrays);
1969 std::swap(m_hp_arrays, rhs.m_hp_arrays);
1970 m_arrays = rhs.m_arrays;
1971 m_const_arrays = rhs.m_const_arrays;
1973 shmem = std::move(rhs.shmem);
1975 rhs.define_function_called =
false;
1976 rhs.m_fabs_v.clear();
1983 template <
class FAB>
1986 m_FA_stats.recordDelete();
1990 template <
class FAB>
1994 if (!define_function_called) {
return false; }
2002 if (
get(fai).box() != fabbox(fai.index()))
2018 template <
class FAB>
2022 return define_function_called;
2025 template <
class FAB>
2034 define(bxs,dm,nvar,
IntVect(ngrow),info,a_factory);
2037 template <
class FAB>
2046 std::unique_ptr<FabFactory<FAB> > factory(a_factory.
clone());
2048 auto *default_arena = m_dallocator.
m_arena;
2051 m_factory = std::move(factory);
2054 define_function_called =
true;
2070 template <
class FAB>
2075 if (shmem.alloc) { alloc_single_chunk =
false; }
2076 if constexpr (!IsBaseFab_v<FAB>) { alloc_single_chunk =
false; }
2078 const int n = indexArray.size();
2080 shmem.alloc = (nworkers > 1);
2082 bool alloc = !shmem.alloc;
2087 if (alloc_single_chunk) {
2088 m_single_chunk_size = 0L;
2089 for (
int i = 0; i < n; ++i) {
2090 int K = indexArray[i];
2091 const Box& tmpbox = fabbox(K);
2092 m_single_chunk_size += factory.
nBytes(tmpbox, n_comp, K);
2095 m_single_chunk_arena = std::make_unique<detail::SingleChunkArena>(ar, m_single_chunk_size);
2096 fab_info.
SetArena(m_single_chunk_arena.get());
2099 m_fabs_v.reserve(n);
2102 for (
int i = 0; i < n; ++i)
2104 int K = indexArray[i];
2105 const Box& tmpbox = fabbox(K);
2106 m_fabs_v.push_back(factory.
create(tmpbox, n_comp, fab_info, K));
2111 m_tags.emplace_back(
"All");
2112 for (
auto const& t : m_region_tag) {
2113 m_tags.push_back(t);
2115 for (
auto const& t : tags) {
2116 m_tags.push_back(t);
2118 for (
auto const& t: m_tags) {
2119 updateMemUsage(t, nbytes, ar);
2131 for (
int i = 0; i < n; ++i) {
2132 int K = indexArray[i];
2133 int owner = distributionMap[K] - teamlead;
2134 Long s = m_fabs_v[i]->size();
2136 shmem.n_values += s;
2137 shmem.n_points += m_fabs_v[i]->numPts();
2139 if (nextoffset[owner] < 0) {
2141 nextoffset[owner] = s;
2143 offset[i] = nextoffset[owner];
2144 nextoffset[owner] += s;
2148 size_t bytes = shmem.n_values*
sizeof(
value_type);
2153 #if defined (BL_USE_MPI3)
2155 static MPI_Info info = MPI_INFO_NULL;
2156 if (info == MPI_INFO_NULL) {
2157 MPI_Info_create(&info);
2158 MPI_Info_set(info,
"alloc_shared_noncontig",
"true");
2163 BL_MPI_REQUIRE( MPI_Win_allocate_shared(bytes,
sizeof(
value_type),
2164 info, team_comm, &mfp, &shmem.win) );
2166 for (
int w = 0; w < nworkers; ++w) {
2170 BL_MPI_REQUIRE( MPI_Win_shared_query(shmem.win, w, &sz, &disp, &dptr) );
2172 dps.push_back(dptr);
2177 amrex::Abort(
"BaseFab::define: to allocate shared memory, USE_MPI3 must be true");
2181 for (
int i = 0; i < n; ++i) {
2182 int K = indexArray[i];
2183 int owner = distributionMap[K] - teamlead;
2185 m_fabs_v[i]->setPtr(p, m_fabs_v[i]->
size());
2188 for (Long i = 0; i < shmem.n_values; i++, mfp++) {
2197 template <
class FAB>
2209 template <
class FAB>
2214 n_comp = elem->nComp();
2217 setFab_assert(boxno, *elem);
2219 if (m_fabs_v.empty()) {
2220 m_fabs_v.resize(indexArray.size(),
nullptr);
2223 const int li = localindex(boxno);
2225 m_factory->destroy(m_fabs_v[li]);
2227 m_fabs_v[li] = elem.release();
2230 template <
class FAB>
2231 template <
class F, std::enable_if_t<std::is_move_constructible_v<F>,
int> >
2236 n_comp = elem.nComp();
2239 setFab_assert(boxno, elem);
2241 if (m_fabs_v.empty()) {
2242 m_fabs_v.resize(indexArray.size(),
nullptr);
2245 const int li = localindex(boxno);
2247 m_factory->destroy(m_fabs_v[li]);
2249 m_fabs_v[li] =
new FAB(std::move(elem));
2252 template <
class FAB>
2257 n_comp = elem->nComp();
2260 setFab_assert(mfi.
index(), *elem);
2262 if (m_fabs_v.empty()) {
2263 m_fabs_v.resize(indexArray.size(),
nullptr);
2268 m_factory->destroy(m_fabs_v[li]);
2270 m_fabs_v[li] = elem.release();
2273 template <
class FAB>
2274 template <
class F, std::enable_if_t<std::is_move_constructible_v<F>,
int> >
2279 n_comp = elem.nComp();
2282 setFab_assert(mfi.
index(), elem);
2284 if (m_fabs_v.empty()) {
2285 m_fabs_v.resize(indexArray.size(),
nullptr);
2290 m_factory->destroy(m_fabs_v[li]);
2292 m_fabs_v[li] =
new FAB(std::move(elem));
2295 template <
class FAB>
2296 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2303 template <
class FAB>
2304 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2310 if (n_grow.max() > 0)
2312 #ifdef AMREX_USE_GPU
2314 bool use_mfparfor =
true;
2315 const int nboxes = local_size();
2317 if (boxarray[indexArray[0]].numPts() > Long(65*65*65)) {
2318 use_mfparfor =
false;
2321 for (
int i = 0; i < nboxes; ++i) {
2322 const Long npts = boxarray[indexArray[i]].numPts();
2323 if (npts >= Long(64*64*64)) {
2324 use_mfparfor =
false;
2326 }
else if (npts <= Long(17*17*17)) {
2331 const IntVect nghost = n_grow;
2333 auto const& ma = this->arrays();
2337 auto const& a = ma[box_no];
2341 for (
int n = 0; n < ncomp; ++n) {
2342 a(i,j,k,strt_comp+n) = val;
2351 Box const& vbx = mfi.validbox();
2352 auto const& a = this->array(mfi);
2355 #if (AMREX_SPACEDIM == 3)
2356 if (nghost[2] > 0) {
2358 b.setRange(2, vbx.
smallEnd(2)-nghost[2], nghost[2]);
2359 b.grow(
IntVect(nghost[0],nghost[1],0));
2360 tags.emplace_back(Tag{a,
b});
2361 b.shift(2, vbx.
length(2)+nghost[2]);
2362 tags.emplace_back(Tag{a,
b});
2365 #if (AMREX_SPACEDIM >= 2)
2366 if (nghost[1] > 0) {
2368 b.setRange(1, vbx.
smallEnd(1)-nghost[1], nghost[1]);
2369 b.grow(0, nghost[0]);
2370 tags.emplace_back(Tag{a,
b});
2371 b.shift(1, vbx.
length(1)+nghost[1]);
2372 tags.emplace_back(Tag{a,
b});
2375 if (nghost[0] > 0) {
2377 b.setRange(0, vbx.
smallEnd(0)-nghost[0], nghost[0]);
2378 tags.emplace_back(Tag{a,
b});
2379 b.shift(0, vbx.
length(0)+nghost[0]);
2380 tags.emplace_back(Tag{a,
b});
2387 tag.dfab(i,j,k,strt_comp+n) = val;
2393 #ifdef AMREX_USE_OMP
2394 #pragma omp parallel
2398 get(fai).template setComplement<RunOn::Host>(val, fai.validbox(), strt_comp, ncomp);
2404 template <
class FAB>
2405 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2409 setDomainBndry(val, 0, n_comp, geom);
2412 template <
class FAB>
2413 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2423 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
2425 int n = domain_box.
length(idim);
2426 domain_box.
grow(idim, n);
2430 #ifdef AMREX_USE_OMP
2431 #pragma omp parallel if (Gpu::notInLaunchRegion())
2435 const Box& gbx = fai.fabbox();
2438 get(fai).template setComplement<RunOn::Device>(val, domain_box, strt_comp, ncomp);
2443 template <
class FAB>
2444 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
2445 typename F::value_type
2450 using T =
typename FAB::value_type;
2452 #ifdef AMREX_USE_GPU
2454 auto const& ma = this->const_arrays();
2459 return ma[box_no](i,j,k,comp);
2464 #ifdef AMREX_USE_OMP
2465 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
2469 Box const& bx = mfi.growntilebox(nghost);
2470 auto const& a = this->const_array(mfi);
2474 tmp += a(i,j,k,comp);
2487 template <
class FAB>
2491 copyTo(dest, 0, 0, dest.nComp(), nghost);
2494 template <
class FAB>
2495 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2499 setVal(val,0,n_comp,n_grow);
2502 template <
class FAB>
2503 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2511 template <
class FAB>
2512 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2522 template <
class FAB>
2523 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2535 #ifdef AMREX_USE_GPU
2537 auto const& fa = this->arrays();
2541 fa[box_no](i,j,k,n+comp) = val;
2549 #ifdef AMREX_USE_OMP
2550 #pragma omp parallel if (Gpu::notInLaunchRegion())
2554 const Box& bx = fai.growntilebox(nghost);
2555 auto fab = this->array(fai);
2558 fab(i,j,k,n+comp) = val;
2564 template <
class FAB>
2565 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2576 template <
class FAB>
2577 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2588 BL_PROFILE(
"FabArray::setVal(val,region,comp,ncomp,nghost)");
2590 #ifdef AMREX_USE_GPU
2592 auto const& fa = this->arrays();
2597 fa[box_no](i,j,k,n+comp) = val;
2606 #ifdef AMREX_USE_OMP
2608 #pragma omp parallel if (Gpu::notInLaunchRegion())
2612 Box b = fai.growntilebox(nghost) & region;
2615 auto fab = this->array(fai);
2618 fab(i,j,k,n+comp) = val;
2625 template <
class FAB>
2626 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
2633 template <
class FAB>
2634 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2642 #ifdef AMREX_USE_GPU
2644 auto const& fa = this->arrays();
2648 fa[box_no](i,j,k,n+comp) =
std::abs(fa[box_no](i,j,k,n+comp));
2656 #ifdef AMREX_USE_OMP
2657 #pragma omp parallel if (Gpu::notInLaunchRegion())
2661 const Box& bx = mfi.growntilebox(nghost);
2662 auto fab = this->array(mfi);
2665 fab(i,j,k,n+comp) =
std::abs(fab(i,j,k,n+comp));
2671 template <
class FAB>
2672 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2678 #ifdef AMREX_USE_GPU
2680 auto const& fa = this->arrays();
2684 fa[box_no](i,j,k,n+comp) += val;
2692 #ifdef AMREX_USE_OMP
2693 #pragma omp parallel if (Gpu::notInLaunchRegion())
2697 const Box& bx = mfi.growntilebox(nghost);
2698 auto fab = this->array(mfi);
2701 fab(i,j,k,n+comp) += val;
2707 template <
class FAB>
2708 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2712 BL_PROFILE(
"FabArray::plus(val, region, comp, num_comp, nghost)");
2714 #ifdef AMREX_USE_GPU
2716 auto const& fa = this->arrays();
2721 fa[box_no](i,j,k,n+comp) += val;
2730 #ifdef AMREX_USE_OMP
2731 #pragma omp parallel if (Gpu::notInLaunchRegion())
2735 const Box& bx = mfi.growntilebox(nghost) & region;
2737 auto fab = this->array(mfi);
2740 fab(i,j,k,n+comp) += val;
2747 template <
class FAB>
2748 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2754 #ifdef AMREX_USE_GPU
2756 auto const& fa = this->arrays();
2760 fa[box_no](i,j,k,n+comp) *= val;
2768 #ifdef AMREX_USE_OMP
2769 #pragma omp parallel if (Gpu::notInLaunchRegion())
2773 const Box& bx = mfi.growntilebox(nghost);
2774 auto fab = this->array(mfi);
2777 fab(i,j,k,n+comp) *= val;
2783 template <
class FAB>
2784 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2788 BL_PROFILE(
"FabArray::mult(val, region, comp, num_comp, nghost)");
2790 #ifdef AMREX_USE_GPU
2792 auto const& fa = this->arrays();
2797 fa[box_no](i,j,k,n+comp) *= val;
2806 #ifdef AMREX_USE_OMP
2807 #pragma omp parallel if (Gpu::notInLaunchRegion())
2811 const Box& bx = mfi.growntilebox(nghost) & region;
2813 auto fab = this->array(mfi);
2816 fab(i,j,k,n+comp) *= val;
2823 template <
class FAB>
2824 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2830 #ifdef AMREX_USE_GPU
2832 auto const& fa = this->arrays();
2836 fa[box_no](i,j,k,n+comp) = numerator / fa[box_no](i,j,k,n+comp);
2844 #ifdef AMREX_USE_OMP
2845 #pragma omp parallel if (Gpu::notInLaunchRegion())
2849 const Box& bx = mfi.growntilebox(nghost);
2850 auto fab = this->array(mfi);
2853 fab(i,j,k,n+comp) = numerator / fab(i,j,k,n+comp);
2859 template <
class FAB>
2860 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
2864 BL_PROFILE(
"FabArray::invert(numerator, region, comp, num_comp, nghost)");
2866 #ifdef AMREX_USE_GPU
2868 auto const& fa = this->arrays();
2873 fa[box_no](i,j,k,n+comp) = numerator / fa[box_no](i,j,k,n+comp);
2882 #ifdef AMREX_USE_OMP
2883 #pragma omp parallel if (Gpu::notInLaunchRegion())
2887 const Box& bx = mfi.growntilebox(nghost) & region;
2889 auto fab = this->array(mfi);
2892 fab(i,j,k,n+comp) = numerator / fab(i,j,k,n+comp);
2899 template <
class FAB>
2906 #ifdef AMREX_USE_OMP
2907 #pragma omp parallel
2916 template <
class FAB>
2917 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
2919 int xcomp,
int ycomp,
int ncomp,
IntVect const& nghost)
2927 #ifdef AMREX_USE_GPU
2929 auto const& yma = y.
arrays();
2930 auto const& xma =
x.const_arrays();
2934 yma[box_no](i,j,k,ycomp+n) += a * xma[box_no](i,j,k,xcomp+n);
2942 #ifdef AMREX_USE_OMP
2943 #pragma omp parallel if (Gpu::notInLaunchRegion())
2947 const Box& bx = mfi.growntilebox(nghost);
2950 auto const& xfab =
x.const_array(mfi);
2951 auto const& yfab = y.
array(mfi);
2954 yfab(i,j,k,ycomp+n) += a * xfab(i,j,k,xcomp+n);
2961 template <
class FAB>
2962 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
2965 int xcomp,
int ycomp,
int ncomp,
IntVect const& nghost)
2973 #ifdef AMREX_USE_GPU
2975 auto const& yfa = y.
arrays();
2976 auto const& xfa =
x.const_arrays();
2980 yfa[box_no](i,j,k,n+ycomp) = xfa[box_no](i,j,k,n+xcomp)
2981 + a * yfa[box_no](i,j,k,n+ycomp);
2989 #ifdef AMREX_USE_OMP
2990 #pragma omp parallel if (Gpu::notInLaunchRegion())
2994 const Box& bx = mfi.growntilebox(nghost);
2995 auto const& xFab =
x.const_array(mfi);
2996 auto const& yFab = y.
array(mfi);
2999 yFab(i,j,k,n+ycomp) = xFab(i,j,k,n+xcomp)
3000 + a * yFab(i,j,k,n+ycomp);
3006 template <
class FAB>
3007 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
3012 int dstcomp,
int numcomp,
const IntVect& nghost)
3022 #ifdef AMREX_USE_GPU
3024 auto const& dstma = dst.
arrays();
3025 auto const& xma =
x.const_arrays();
3030 dstma[box_no](i,j,k,dstcomp+n) = a*xma[box_no](i,j,k,xcomp+n)
3031 +
b*yma[box_no](i,j,k,ycomp+n);
3039 #ifdef AMREX_USE_OMP
3040 #pragma omp parallel if (Gpu::notInLaunchRegion())
3044 const Box& bx = mfi.growntilebox(nghost);
3045 auto const& xfab =
x.const_array(mfi);
3047 auto const& dfab = dst.
array(mfi);
3050 dfab(i,j,k,dstcomp+n) = a*xfab(i,j,k,xcomp+n) +
b*yfab(i,j,k,ycomp+n);
3056 template <
class FAB>
3057 template <
typename BUF>
3062 if ( n_grow.max() > 0 ) {
3064 FillBoundary_finish<BUF>();
3068 template <
class FAB>
3069 template <
typename BUF>
3074 if ( n_grow.max() > 0 ) {
3075 FillBoundary_nowait<BUF>(0,
nComp(), n_grow, period, cross);
3076 FillBoundary_finish<BUF>();
3080 template <
class FAB>
3081 template <
typename BUF>
3087 "FillBoundary: asked to fill more ghost cells than we have");
3088 if ( nghost.
max() > 0 ) {
3089 FillBoundary_nowait<BUF>(0,
nComp(), nghost, period, cross);
3090 FillBoundary_finish<BUF>();
3094 template <
class FAB>
3095 template <
typename BUF>
3100 if ( n_grow.max() > 0 ) {
3102 FillBoundary_finish<BUF>();
3106 template <
class FAB>
3107 template <
typename BUF>
3112 if ( n_grow.max() > 0 ) {
3113 FillBoundary_nowait<BUF>(scomp, ncomp, n_grow, period, cross);
3114 FillBoundary_finish<BUF>();
3118 template <
class FAB>
3119 template <
typename BUF>
3126 "FillBoundary: asked to fill more ghost cells than we have");
3127 if ( nghost.
max() > 0 ) {
3128 FillBoundary_nowait<BUF>(scomp, ncomp, nghost, period, cross);
3129 FillBoundary_finish<BUF>();
3133 template <
class FAB>
3134 template <
typename BUF>
3141 template <
class FAB>
3142 template <
typename BUF>
3146 FillBoundary_nowait<BUF>(0,
nComp(),
nGrowVect(), period, cross);
3149 template <
class FAB>
3150 template <
typename BUF>
3154 FillBoundary_nowait<BUF>(0,
nComp(), nghost, period, cross);
3157 template <
class FAB>
3158 template <
typename BUF>
3165 template <
class FAB>
3169 BL_PROFILE(
"FabArray::FillBoundaryAndSync()");
3170 if (n_grow.max() > 0 || !is_cell_centered()) {
3171 FillBoundaryAndSync_nowait(0,
nComp(), n_grow, period);
3172 FillBoundaryAndSync_finish();
3176 template <
class FAB>
3181 BL_PROFILE(
"FabArray::FillBoundaryAndSync()");
3182 if (nghost.
max() > 0 || !is_cell_centered()) {
3183 FillBoundaryAndSync_nowait(scomp, ncomp, nghost, period);
3184 FillBoundaryAndSync_finish();
3188 template <
class FAB>
3195 template <
class FAB>
3201 FBEP_nowait(scomp, ncomp, nghost, period,
false,
false,
true);
3204 template <
class FAB>
3212 template <
class FAB>
3217 if (!is_cell_centered()) {
3223 template <
class FAB>
3228 if (!is_cell_centered()) {
3234 template <
class FAB>
3241 template <
class FAB>
3246 FBEP_nowait(scomp, ncomp,
IntVect(0), period,
false,
false,
true);
3249 template <
class FAB>
3257 template <
class FAB>
3261 SumBoundary(0, n_comp,
IntVect(0), period);
3264 template <
class FAB>
3268 SumBoundary(scomp, ncomp,
IntVect(0), period);
3271 template <
class FAB>
3275 SumBoundary(scomp, ncomp, this->
nGrowVect(), nghost, period);
3278 template <
class FAB>
3284 SumBoundary_nowait(scomp, ncomp, src_nghost, dst_nghost, period);
3285 SumBoundary_finish();
3288 template <
class FAB>
3292 SumBoundary_nowait(0, n_comp,
IntVect(0), period);
3295 template <
class FAB>
3299 SumBoundary_nowait(scomp, ncomp,
IntVect(0), period);
3302 template <
class FAB>
3306 SumBoundary_nowait(scomp, ncomp, this->
nGrowVect(), nghost, period);
3309 template <
class FAB>
3313 BL_PROFILE(
"FabArray<FAB>::SumBoundary_nowait()");
3320 amrex::Copy(*tmp, *
this, scomp, 0, ncomp, src_nghost);
3321 this->
setVal(
typename FAB::value_type(0), scomp, ncomp, dst_nghost);
3325 if (!this->pcd) {
delete tmp; }
3328 template <
class FAB>
3332 BL_PROFILE(
"FabArray<FAB>::SumBoundary_finish()");
3342 template <
class FAB>
3353 template <
class FAB>
3359 FBEP_nowait(scomp, ncomp,
nGrowVect(), period,
false,
true);
3364 template <
class FAB>
3371 FBEP_nowait(scomp, ncomp, nghost, period,
false,
true);
3376 template <
class FAB>
3377 template <
typename BUF>
3381 FBEP_nowait<BUF>(scomp, ncomp,
nGrowVect(), period, cross);
3384 template <
class FAB>
3385 template <
typename BUF>
3390 FBEP_nowait<BUF>(scomp, ncomp, nghost, period, cross);
3393 template <
class FAB>
3394 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>Z>
3402 int ncomp = this->
nComp();
3406 for (
int i = 0; i < AMREX_SPACEDIM; ++i) {
3408 domain.
grow(i, ngrow[i]);
3412 #ifdef AMREX_USE_GPU
3414 auto const& fa = this->arrays();
3418 auto const& fab = fa[box_no];
3423 }
else if (domain.
contains(i,j,k)) {
3424 fab(i,j,k,n) = notcovered;
3435 #ifdef AMREX_USE_OMP
3436 #pragma omp parallel if (Gpu::notInLaunchRegion())
3440 auto const& fab = this->array(mfi);
3441 Box const& fbx = mfi.growntilebox();
3442 Box const& gbx = fbx & domain;
3443 Box const& vbx = mfi.validbox();
3447 fab(i,j,k,n) = interior;
3449 fab(i,j,k,n) = notcovered;
3451 fab(i,j,k,n) = physbnd;
3458 setVal(covered, TheFB, 0, ncomp);
3461 template <
class FAB>
3462 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
3466 BL_PROFILE(
"FabArray::setVal(val, thecmd, scomp, ncomp)");
3468 #ifdef AMREX_USE_GPU
3471 CMD_local_setVal_gpu(val, thecmd, scomp, ncomp);
3472 CMD_remote_setVal_gpu(val, thecmd, scomp, ncomp);
3480 auto N_locs =
static_cast<int>(LocTags.size());
3481 #ifdef AMREX_USE_OMP
3482 #pragma omp parallel for if (thecmd.m_threadsafe_loc)
3484 for (
int i = 0; i < N_locs; ++i) {
3486 (*this)[tag.
dstIndex].template setVal<RunOn::Host>(val, tag.
dbox, scomp, ncomp);
3489 for (
const auto & RcvTag : RcvTags) {
3490 auto N =
static_cast<int>(RcvTag.second.size());
3491 #ifdef AMREX_USE_OMP
3492 #pragma omp parallel for if (thecmd.m_threadsafe_rcv)
3494 for (
int i = 0; i < N; ++i) {
3496 (*this)[tag.
dstIndex].template setVal<RunOn::Host>(val, tag.
dbox, scomp, ncomp);
3502 template <
class FAB>
3503 template <class F, std::enable_if_t<IsBaseFab<F>::value,
int>>
3510 #ifdef AMREX_USE_OMP
3511 #pragma omp parallel if (thecmd.m_threadsafe_rcv)
3520 auto N_locs =
static_cast<int>(LocTags.size());
3521 for (
int i = 0; i < N_locs; ++i) {
3526 for (
const auto & RcvTag : RcvTags) {
3527 auto N =
static_cast<int>(RcvTag.second.size());
3528 for (
int i = 0; i < N; ++i) {
3536 template <
class FAB>
3537 template <typename F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
3538 typename F::value_type
3540 [[maybe_unused]]
bool ignore_covered)
const
3544 using RT =
typename F::value_type;
3549 if ( this->is_cell_centered() && this->hasEBFabFactory() && ignore_covered )
3553 #ifdef AMREX_USE_GPU
3555 auto const& flagsma = flags.const_arrays();
3556 auto const& ma = this->const_arrays();
3560 if (flagsma[box_no](i,j,k).isCovered()) {
3564 auto const& a = ma[box_no];
3565 for (
int n = 0; n < ncomp; ++n) {
3574 #ifdef AMREX_USE_OMP
3575 #pragma omp parallel reduction(max:nm0)
3578 Box const& bx = mfi.growntilebox(nghost);
3580 auto const& flag = flags.const_array(mfi);
3581 auto const& a = this->const_array(mfi);
3584 if (!flag(i,j,k).isCovered()) {
3595 #ifdef AMREX_USE_GPU
3597 auto const& ma = this->const_arrays();
3601 return std::abs(ma[box_no](i,j,k,comp+n));
3606 #ifdef AMREX_USE_OMP
3607 #pragma omp parallel reduction(max:nm0)
3610 Box const& bx = mfi.growntilebox(nghost);
3611 auto const& a = this->const_array(mfi);
3627 template <
class FAB>
3628 template <typename IFAB, typename F, std::enable_if_t<IsBaseFab<F>::value,
int> FOO>
3629 typename F::value_type
3631 IntVect const& nghost,
bool local)
const
3635 using RT =
typename F::value_type;
3639 #ifdef AMREX_USE_GPU
3641 auto const& ma = this->const_arrays();
3642 auto const& maskma =
mask.const_arrays();
3646 if (maskma[box_no](i,j,k)) {
3648 auto const& a = ma[box_no];
3649 for (
int n = 0; n < ncomp; ++n) {
3660 #ifdef AMREX_USE_OMP
3661 #pragma omp parallel reduction(max:nm0)
3664 Box const& bx = mfi.growntilebox(nghost);
3665 auto const& a = this->const_array(mfi);
3666 auto const& mskfab =
mask.const_array(mfi);
3669 if (mskfab(i,j,k)) {
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:49
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_NODISCARD
Definition: AMReX_Extension.H:251
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_RESTRICT
Definition: AMReX_Extension.H:37
#define AMREX_HOST_DEVICE_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:51
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_IF_ON_DEVICE(CODE)
Definition: AMReX_GpuQualifiers.H:56
#define AMREX_IF_ON_HOST(CODE)
Definition: AMReX_GpuQualifiers.H:58
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
Array4< int const > mask
Definition: AMReX_InterpFaceRegister.cpp:93
#define AMREX_LOOP_3D(bx, i, j, k, block)
Definition: AMReX_Loop.nolint.H:4
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition: AMReX_Loop.nolint.H:16
int MPI_Comm
Definition: AMReX_ccse-mpi.H:47
if(!(yy_init))
Definition: amrex_iparser.lex.nolint.H:935
A virtual base class for objects that manage their own dynamic memory allocation.
Definition: AMReX_Arena.H:100
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
A FortranArrayBox(FAB)-like object.
Definition: AMReX_BaseFab.H:183
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:550
AMREX_GPU_HOST_DEVICE const IntVectND< dim > & smallEnd() const &noexcept
Get the smallend of the BoxND.
Definition: AMReX_Box.H:105
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition: AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Definition: AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
Definition: AMReX_EBFabFactory.H:24
const FabArray< EBCellFlagFab > & getMultiEBCellFlagFab() const noexcept
Definition: AMReX_EBFabFactory.H:50
Base class for FabArray.
Definition: AMReX_FabArrayBase.H:41
IntVect nGrowVect() const noexcept
Definition: AMReX_FabArrayBase.H:79
Vector< int > indexArray
Definition: AMReX_FabArrayBase.H:445
FabArrayBase & operator=(const FabArrayBase &rhs)=default
static bool getAllocSingleChunk()
Definition: AMReX_FabArrayBase.H:727
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition: AMReX_FabArrayBase.H:94
bool isFusingCandidate() const noexcept
Is this a good candidate for kernel fusing?
void define(const BoxArray &bxs, const DistributionMapping &dm, int nvar, int ngrow)
CopyComTag::CopyComTagsContainer CopyComTagsContainer
Definition: AMReX_FabArrayBase.H:219
CopyComTag::MapOfCopyComTagContainers MapOfCopyComTagContainers
Definition: AMReX_FabArrayBase.H:220
int local_size() const noexcept
Return the number of local FABs in the FabArray.
Definition: AMReX_FabArrayBase.H:112
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition: AMReX_FabArrayBase.H:130
CpOp
parallel copy or add
Definition: AMReX_FabArrayBase.H:393
@ ADD
Definition: AMReX_FabArrayBase.H:393
@ COPY
Definition: AMReX_FabArrayBase.H:393
DistributionMapping distributionMap
Definition: AMReX_FabArrayBase.H:444
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition: AMReX_FabArrayBase.H:82
static AMREX_EXPORT FabArrayStats m_FA_stats
Definition: AMReX_FabArrayBase.H:723
An Array of FortranArrayBox(FAB)-like Objects.
Definition: AMReX_FabArray.H:344
void ParallelCopyToGhost_finish()
void setFab(int boxno, std::unique_ptr< FAB > elem)
Explicitly set the Kth FAB in the FabArray to point to elem.
Definition: AMReX_FabArray.H:2211
void copy(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, int src_nghost, int dst_nghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:942
F::value_type sum(int comp, IntVect const &nghost, bool local=false) const
Returns the sum of component "comp".
Definition: AMReX_FabArray.H:2446
void EnforcePeriodicity(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period)
Definition: AMReX_FabArray.H:3366
const FAB & get(const MFIter &mfi) const noexcept
Return a constant reference to the FAB associated with mfi.
Definition: AMReX_FabArray.H:509
void abs(int comp, int ncomp, int nghost=0)
Definition: AMReX_FabArray.H:2628
void copy(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const IntVect &src_nghost, const IntVect &dst_nghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:953
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1593
void * m_dp_arrays
Definition: AMReX_FabArray.H:1282
void FBEP_nowait(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period, bool cross, bool enforce_periodicity_only=false, bool override_sync=false)
static void pack_send_buffer_gpu(FabArray< FAB > const &src, int scomp, int ncomp, Vector< char * > const &send_data, Vector< std::size_t > const &send_size, Vector< const CopyComTagsContainer * > const &send_cctc)
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:778
typename std::conditional_t< IsBaseFab< FAB >::value, FAB, FABType >::value_type value_type
Definition: AMReX_FabArray.H:355
std::unique_ptr< FabArray< FAB > > os_temp
Definition: AMReX_FabArray.H:1412
void FillBoundary(const IntVect &nghost, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3083
void prefetchToDevice(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1549
void FillBoundary_nowait(const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3144
void shift(const IntVect &v)
Shift the boxarray by vector v.
Definition: AMReX_FabArray.H:2901
bool ok() const
Return true if the FabArray is well-defined. That is, the FabArray has a BoxArray and DistributionMap...
Definition: AMReX_FabArray.H:1992
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi, int start_comp) const noexcept
Definition: AMReX_FabArray.H:1609
void CMD_local_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition: AMReX_FBI.H:332
void CMD_remote_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition: AMReX_FBI.H:362
void ParallelAdd_nowait(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:789
value_type * singleChunkPtr() noexcept
Definition: AMReX_FabArray.H:460
FabArray(FabArray< FAB > &&rhs) noexcept
Definition: AMReX_FabArray.H:1927
Array4< typename FabArray< FAB >::value_type const > array(int K, int start_comp) const noexcept
Definition: AMReX_FabArray.H:1625
FabArray(const BoxArray &bxs, const DistributionMapping &dm, int nvar, const IntVect &ngrow, const MFInfo &info=MFInfo(), const FabFactory< FAB > &factory=DefaultFabFactory< FAB >())
Definition: AMReX_FabArray.H:1891
bool defined(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1485
void setVal(value_type val, const CommMetaData &thecmd, int scomp, int ncomp)
Definition: AMReX_FabArray.H:3464
void OverrideSync_nowait(int scomp, int ncomp, const Periodicity &period)
Definition: AMReX_FabArray.H:3243
FabArray(const FabArray< FAB > &rhs)=delete
void ParallelCopyToGhost(const FabArray< FAB > &src, int scomp, int dcomp, int ncomp, const IntVect &snghost, const IntVect &dnghost, const Periodicity &period=Periodicity::NonPeriodic())
void FillBoundary_test()
Definition: AMReX_FabArrayCommI.H:886
void ParallelCopy_finish()
const FabFactory< FAB > & Factory() const noexcept
Definition: AMReX_FabArray.H:442
void OverrideSync(const Periodicity &period=Periodicity::NonPeriodic())
Synchronize nodal data. The synchronization will override valid regions by the intersecting valid reg...
Definition: AMReX_FabArray.H:3214
void FillBoundary(bool cross=false)
Copy on intersection within a FabArray. Data is copied from valid regions to intersecting regions of ...
Definition: AMReX_FabArray.H:3059
FAB const * fabPtr(int K) const noexcept
Definition: AMReX_FabArray.H:1527
void clear_arrays()
Definition: AMReX_FabArray.H:1718
void ParallelCopy(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, int src_nghost, int dst_nghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:860
void SumBoundary(const Periodicity &period=Periodicity::NonPeriodic())
Sum values in overlapped cells. The destination is limited to valid cells.
Definition: AMReX_FabArray.H:3259
Long m_single_chunk_size
Definition: AMReX_FabArray.H:1272
FAB & get(int K) noexcept
Return a reference to the FAB associated with the Kth element.
Definition: AMReX_FabArray.H:527
static void LinComb(FabArray< FAB > &dst, value_type a, const FabArray< FAB > &x, int xcomp, value_type b, const FabArray< FAB > &y, int ycomp, int dstcomp, int numcomp, const IntVect &nghost)
dst = a*x + b*y
Definition: AMReX_FabArray.H:3009
const FAB & get(int K) const noexcept
Return a constant reference to the FAB associated with the Kth element.
Definition: AMReX_FabArray.H:521
void OverrideSync_nowait(const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3236
AMREX_NODISCARD FAB * release(int K)
Release ownership of the FAB. This function is not thread safe.
Definition: AMReX_FabArray.H:1735
void setDomainBndry(value_type val, const Geometry &geom)
Set all values outside the Geometry domain to val.
Definition: AMReX_FabArray.H:2407
std::unique_ptr< PCData< FAB > > pcd
Definition: AMReX_FabArray.H:1409
void define(const BoxArray &bxs, const DistributionMapping &dm, int nvar, int ngrow, const MFInfo &info=MFInfo(), const FabFactory< FAB > &factory=DefaultFabFactory< FAB >())
Define this FabArray identically to that performed by the constructor having an analogous function si...
Definition: AMReX_FabArray.H:2027
void ParallelAdd_nowait(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const IntVect &src_nghost, const IntVect &dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:889
std::unique_ptr< FBData< FAB > > fbd
Definition: AMReX_FabArray.H:1408
std::unique_ptr< detail::SingleChunkArena > m_single_chunk_arena
Definition: AMReX_FabArray.H:1271
FabArray(const FabArray< FAB > &rhs, MakeType maketype, int scomp, int ncomp)
Definition: AMReX_FabArray.H:1905
void ParallelAdd(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic())
This function copies data from src to this FabArray. Each FAB in fa is intersected with all FABs in t...
Definition: AMReX_FabArray.H:775
FAB fab_type
Definition: AMReX_FabArray.H:357
void BuildMask(const Box &phys_domain, const Periodicity &period, value_type covered, value_type notcovered, value_type physbnd, value_type interior)
Definition: AMReX_FabArray.H:3396
void OverrideSync(int scomp, int ncomp, const Periodicity &period)
Synchronize nodal data. The synchronization will override valid regions by the intersecting valid reg...
Definition: AMReX_FabArray.H:3225
void LocalCopy(FabArray< SFAB > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
Perform local copy of FabArray data.
Definition: AMReX_FabArray.H:1818
bool SharedMemory() const noexcept
Definition: AMReX_FabArray.H:1340
value_type const * singleChunkPtr() const noexcept
Definition: AMReX_FabArray.H:466
LayoutData< int > RecvLayoutMask(const CommMetaData &thecmd)
Definition: AMReX_FabArray.H:3505
void FillBoundary(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3121
void FillBoundary_nowait(const IntVect &nghost, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3152
Vector< std::string > m_tags
Definition: AMReX_FabArray.H:1288
void ParallelCopyToGhost_nowait(const FabArray< FAB > &src, int scomp, int dcomp, int ncomp, const IntVect &snghost, const IntVect &dnghost, const Periodicity &period=Periodicity::NonPeriodic())
void FB_local_copy_cpu(const FB &TheFB, int scomp, int ncomp)
Definition: AMReX_FBI.H:211
void invert(value_type numerator, int comp, int num_comp, int nghost=0)
Definition: AMReX_FabArray.H:2826
void FB_local_copy_gpu(const FB &TheFB, int scomp, int ncomp)
Definition: AMReX_FBI.H:272
static void unpack_recv_buffer_cpu(FabArray< FAB > &dst, int dcomp, int ncomp, Vector< char * > const &recv_data, Vector< std::size_t > const &recv_size, Vector< const CopyComTagsContainer * > const &recv_cctc, CpOp op, bool is_thread_safe)
F::value_type norminf(int comp, int ncomp, IntVect const &nghost, bool local=false, [[maybe_unused]] bool ignore_covered=false) const
Return infinity norm.
Definition: AMReX_FabArray.H:3539
void ParallelAdd_nowait(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:828
void PC_local_gpu(const CPC &thecpc, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp, CpOp op)
Definition: AMReX_PCI.H:88
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1561
void setFab_assert(int K, FAB const &fab) const
Definition: AMReX_FabArray.H:2199
Array4< typename FabArray< FAB >::value_type const > const_array(int K) const noexcept
Definition: AMReX_FabArray.H:1601
void plus(value_type val, int comp, int num_comp, int nghost=0)
Definition: AMReX_FabArray.H:2674
DataAllocator m_dallocator
Definition: AMReX_FabArray.H:1270
void FillBoundaryAndSync(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period)
Fill ghost cells and synchronize nodal data. Ghost regions are filled with data from the intersecting...
Definition: AMReX_FabArray.H:3178
void copy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:784
void FillBoundaryAndSync_finish()
Definition: AMReX_FabArray.H:3206
void ParallelCopy_nowait(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, int src_nghost, int dst_nghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:899
void SumBoundary_nowait(int scomp, int ncomp, IntVect const &src_nghost, IntVect const &dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3311
void SumBoundary_nowait(int scomp, int ncomp, IntVect const &nghost, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3304
void FillBoundary_nowait(bool cross=false)
Definition: AMReX_FabArray.H:3136
void FillBoundary(const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3071
static void Xpay(FabArray< FAB > &y, value_type a, FabArray< FAB > const &x, int xcomp, int ycomp, int ncomp, IntVect const &nghost)
y = x + a*y
Definition: AMReX_FabArray.H:2964
void clear()
Releases FAB memory in the FabArray.
Definition: AMReX_FabArray.H:1774
void FillBoundary_nowait(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3387
void FillBoundary(int scomp, int ncomp, bool cross=false)
Same as FillBoundary(), but only copies ncomp components starting at scomp.
Definition: AMReX_FabArray.H:3097
void FillBoundary_nowait(int scomp, int ncomp, bool cross=false)
Definition: AMReX_FabArray.H:3160
Array4< typename FabArray< FAB >::value_type > array(const MFIter &mfi) noexcept
Definition: AMReX_FabArray.H:1569
std::vector< FAB * > m_fabs_v
The data.
Definition: AMReX_FabArray.H:1279
void setBndry(value_type val)
Set all values in the boundary region to val.
Definition: AMReX_FabArray.H:2298
void SumBoundary(int scomp, int ncomp, IntVect const &nghost, const Periodicity &period=Periodicity::NonPeriodic())
Sum values in overlapped cells. The destination is limited to valid + ngrow cells.
Definition: AMReX_FabArray.H:3273
void FillBoundaryAndSync(const Periodicity &period=Periodicity::NonPeriodic())
Fill ghost cells and synchronize nodal data. Ghost regions are filled with data from the intersecting...
Definition: AMReX_FabArray.H:3167
void ParallelAdd(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const IntVect &src_nghost, const IntVect &dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:852
bool define_function_called
has define() been called?
Definition: AMReX_FabArray.H:1275
void ParallelAdd(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const Periodicity &period=Periodicity::NonPeriodic())
This function copies data from src to this FabArray. Each FAB in src is intersected with all FABs in ...
Definition: AMReX_FabArray.H:805
FabArray() noexcept
Constructs an empty FabArray<FAB>.
Definition: AMReX_FabArray.H:1866
static void unpack_recv_buffer_gpu(FabArray< FAB > &dst, int dcomp, int ncomp, Vector< char * > const &recv_data, Vector< std::size_t > const &recv_size, Vector< const CopyComTagsContainer * > const &recv_cctc, CpOp op, bool is_thread_safe)
bool defined(int K) const noexcept
Definition: AMReX_FabArray.H:1472
FAB * fabPtr(int K) noexcept
Definition: AMReX_FabArray.H:1518
void SumBoundary(int scomp, int ncomp, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3266
void ParallelAdd(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, int src_nghost, int dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Similar to the above function, except that source and destination are grown by src_nghost and dst_ngh...
Definition: AMReX_FabArray.H:843
std::unique_ptr< FabFactory< FAB > > m_factory
Definition: AMReX_FabArray.H:1269
void Redistribute(const FabArray< FAB > &src, int scomp, int dcomp, int ncomp, const IntVect &nghost)
Copy from src to this. this and src have the same BoxArray, but different DistributionMapping.
Definition: AMReX_FabArrayCommI.H:859
void setVal(value_type val)
Set all components in the entire region of each FAB to val.
Definition: AMReX_FabArray.H:2497
typename std::vector< FAB * >::iterator Iterator
Definition: AMReX_FabArray.H:1343
void copy(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:820
MultiArray4< typename FabArray< FAB >::value_type > arrays() noexcept
Definition: AMReX_FabArray.H:1657
void copyTo(FAB &dest, int nghost=0) const
Copy the values contained in the intersection of the valid + nghost region of this FabArray with the ...
Definition: AMReX_FabArray.H:2489
void ParallelAdd_nowait(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, int src_nghost, int dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:879
void SumBoundary(int scomp, int ncomp, IntVect const &src_nghost, IntVect const &dst_nghost, const Periodicity &period=Periodicity::NonPeriodic())
Sum values in overlapped cells. For computing the overlap, the dst is grown by dst_ngrow,...
Definition: AMReX_FabArray.H:3280
void FillBoundary_nowait(int scomp, int ncomp, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3379
void SumBoundary_finish()
Definition: AMReX_FabArray.H:3330
void SumBoundary_nowait(int scomp, int ncomp, const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3297
std::size_t singleChunkSize() const noexcept
Definition: AMReX_FabArray.H:472
void mult(value_type val, int comp, int num_comp, int nghost=0)
Definition: AMReX_FabArray.H:2750
void ParallelCopy_nowait(const FabArray< FAB > &src, int scomp, int dcomp, int ncomp, const IntVect &snghost, const IntVect &dnghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY, const FabArrayBase::CPC *a_cpc=nullptr, bool to_ghost_cells_only=false)
static void Saxpy(FabArray< FAB > &y, value_type a, FabArray< FAB > const &x, int xcomp, int ycomp, int ncomp, IntVect const &nghost)
y += a*x
Definition: AMReX_FabArray.H:2918
MultiArray4< value_type > m_arrays
Definition: AMReX_FabArray.H:1285
void AllocFabs(const FabFactory< FAB > &factory, Arena *ar, const Vector< std::string > &tags, bool alloc_single_chunk)
Definition: AMReX_FabArray.H:2072
void FillBoundaryAndSync_nowait(const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3190
Array4< typename FabArray< FAB >::value_type const > const_array(int K, int start_comp) const noexcept
Definition: AMReX_FabArray.H:1649
void FillBoundaryAndSync_nowait(int scomp, int ncomp, const IntVect &nghost, const Periodicity &period)
Definition: AMReX_FabArray.H:3197
Array4< typename FabArray< FAB >::value_type > array(int K) noexcept
Definition: AMReX_FabArray.H:1585
void * m_hp_arrays
Definition: AMReX_FabArray.H:1284
ShMem shmem
Definition: AMReX_FabArray.H:1338
void LocalAdd(FabArray< FAB > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
Perform local addition of FabArray data.
Definition: AMReX_FabArray.H:1827
FabArray< FAB > & operator=(FabArray< FAB > &&rhs) noexcept
Definition: AMReX_FabArray.H:1953
MultiArray4< value_type const > m_const_arrays
Definition: AMReX_FabArray.H:1286
void ParallelCopy(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:811
bool hasEBFabFactory() const noexcept
Definition: AMReX_FabArray.H:449
FAB & atLocalIdx(int L) noexcept
Return a reference to the FAB associated with local index L.
Definition: AMReX_FabArray.H:530
Array4< typename FabArray< FAB >::value_type const > array(int K) const noexcept
Definition: AMReX_FabArray.H:1577
FAB & get(const MFIter &mfi) noexcept
Returns a reference to the FAB associated mfi.
Definition: AMReX_FabArray.H:515
const Vector< std::string > & tags() const noexcept
Definition: AMReX_FabArray.H:447
bool isAllRegular() const noexcept
Definition: AMReX_FabArray.H:474
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi, int start_comp) const noexcept
Definition: AMReX_FabArray.H:1641
Arena * arena() const noexcept
Definition: AMReX_FabArray.H:445
static void pack_send_buffer_cpu(FabArray< FAB > const &src, int scomp, int ncomp, Vector< char * > const &send_data, Vector< std::size_t > const &send_size, Vector< const CopyComTagsContainer * > const &send_cctc)
void build_arrays() const
Definition: AMReX_FabArray.H:1684
void SumBoundary_nowait(const Periodicity &period=Periodicity::NonPeriodic())
Definition: AMReX_FabArray.H:3290
const FAB & atLocalIdx(int L) const noexcept
Definition: AMReX_FabArray.H:531
void ParallelCopy(const FabArray< FAB > &src, int scomp, int dcomp, int ncomp, const IntVect &snghost, const IntVect &dnghost, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY, const FabArrayBase::CPC *a_cpc=nullptr)
FAB * fabPtr(const MFIter &mfi) noexcept
Return pointer to FAB.
Definition: AMReX_FabArray.H:1498
Array4< typename FabArray< FAB >::value_type > array(const MFIter &mfi, int start_comp) noexcept
Definition: AMReX_FabArray.H:1617
void copyTo(FAB &dest, int scomp, int dcomp, int ncomp, int nghost=0) const
Copy the values contained in the intersection of the num_comp component valid + nghost region of this...
void EnforcePeriodicity(const Periodicity &period)
Fill ghost cells with values from their corresponding cells across periodic boundaries,...
Definition: AMReX_FabArray.H:3344
void OverrideSync_finish()
Definition: AMReX_FabArray.H:3251
void FillBoundary(int scomp, int ncomp, const Periodicity &period, bool cross=false)
Definition: AMReX_FabArray.H:3109
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition: AMReX_FabArray.H:1675
void prefetchToHost(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1537
void define(const BoxArray &bxs, const DistributionMapping &dm, int nvar, const IntVect &ngrow, const MFInfo &info=MFInfo(), const FabFactory< FAB > &factory=DefaultFabFactory< FAB >())
Definition: AMReX_FabArray.H:2039
const FAB & operator[](const MFIter &mfi) const noexcept
Return a constant reference to the FAB associated with mfi.
Definition: AMReX_FabArray.H:506
F::value_type norminf(FabArray< IFAB > const &mask, int comp, int ncomp, IntVect const &nghost, bool local=false) const
Return infinity norm in masked region.
Definition: AMReX_FabArray.H:3630
void ParallelCopy_nowait(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:792
void EnforcePeriodicity(int scomp, int ncomp, const Periodicity &period)
Definition: AMReX_FabArray.H:3355
void PC_local_cpu(const CPC &thecpc, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp, CpOp op)
Definition: AMReX_PCI.H:6
void FillBoundary_finish()
FAB const * fabPtr(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1508
void ParallelCopy_nowait(const FabArray< FAB > &src, int src_comp, int dest_comp, int num_comp, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:834
Array4< typename FabArray< FAB >::value_type > array(int K, int start_comp) noexcept
Definition: AMReX_FabArray.H:1633
bool isDefined() const
Definition: AMReX_FabArray.H:2020
~FabArray()
The destructor – deletes all FABs in the array.
Definition: AMReX_FabArray.H:1984
Definition: AMReX_FabFactory.H:50
virtual AMREX_NODISCARD FAB * create(const Box &box, int ncomps, const FabInfo &info, int box_index) const =0
virtual AMREX_NODISCARD FabFactory< FAB > * clone() const =0
virtual AMREX_NODISCARD Long nBytes(const Box &box, int ncomps, int) const
Definition: AMReX_FabFactory.H:64
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition: AMReX_Geometry.H:210
bool isPeriodic(int dir) const noexcept
Is the domain periodic in the specified direction?
Definition: AMReX_Geometry.H:331
Definition: AMReX_Tuple.H:93
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition: AMReX_IntVect.H:443
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allLE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is less than or equal to argument for all components. NOTE: This is NOT a strict...
Definition: AMReX_IntVect.H:393
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int max() const noexcept
maximum (no absolute values) value
Definition: AMReX_IntVect.H:214
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
int index() const noexcept
The index into the underlying BoxArray of the current FAB.
Definition: AMReX_MFIter.H:144
int LocalIndex() const noexcept
Return local index into the vector of fab pointers, m_fabs_v When AllBoxes is on, local_index_map is ...
Definition: AMReX_MFIter.H:159
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition: AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition: AMReX_Periodicity.cpp:52
bool isAnyPeriodic() const noexcept
Definition: AMReX_Periodicity.H:22
bool isPeriodic(int dir) const noexcept
Definition: AMReX_Periodicity.H:26
Long size() const noexcept
Definition: AMReX_Vector.H:50
@ FAB
Definition: AMReX_AmrvisConstants.H:86
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool inNoSyncRegion() noexcept
Definition: AMReX_GpuControl.H:146
void htod_memcpy(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:293
std::enable_if_t< IsBaseFab< FAB >) &&IsDataPacking< DataPacking, FAB >)> ParallelCopy_finish(FabArray< FAB > &dest, CommHandler handler, const FabArrayBase::CommMetaData &cmd, const DataPacking &data_packing)
Definition: AMReX_NonLocalBC.H:793
std::enable_if_t< IsBaseFab< FAB >::value > PrepareSendBuffers(const PackComponents &components, FabArray< FAB > &dest, const FabArray< FAB > &src, CommData &comm, const FabArrayBase::MapOfCopyComTagContainers &cctc)
Calls PrepareComBuffers.
Definition: AMReX_NonLocalBC.H:555
AMREX_NODISCARD CommHandler ParallelCopy_nowait(NoLocalCopy, FabArray< FAB > &dest, const FabArray< FAB > &src, const FabArrayBase::CommMetaData &cmd, const DataPacking &data_packing)
Definition: AMReX_NonLocalBC.H:701
std::enable_if_t< IsBaseFab< FAB >) &&IsCallableR< Dim3, DTOS, Dim3 >) &&IsFabProjection< Proj, FAB >)> FillBoundary_finish(CommHandler handler, FabArray< FAB > &mf, const FabArrayBase::CommMetaData &cmd, int scomp, int ncomp, DTOS const &dtos, Proj const &proj=Proj{})
Finish communication started by FillBoundary_nowait.
void Min(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:152
void Sum(T &v, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:204
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:126
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
const ProcessTeam & MyTeam() noexcept
Definition: AMReX_ParallelDescriptor.H:349
int MyProc() noexcept
return the rank number local to the current Parallel Context
Definition: AMReX_ParallelDescriptor.H:125
int MyTeamLead() noexcept
Definition: AMReX_ParallelDescriptor.H:309
int TeamSize() noexcept
Definition: AMReX_ParallelDescriptor.H:294
int SeqNum() noexcept
Returns sequential message sequence numbers, usually used as tags for send/recv.
Definition: AMReX_ParallelDescriptor.H:613
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void swap(T &a, T &b) noexcept
Definition: AMReX_algoim_K.H:113
@ max
Definition: AMReX_ParallelReduce.H:17
constexpr bool is_convertible(T)
Definition: AMReX_TypeTraits.H:246
logical function omp_in_parallel()
Definition: AMReX_omp_mod.F90:41
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
MakeType
Definition: AMReX_MakeType.H:7
@ make_alias
Definition: AMReX_MakeType.H:7
int nComp(FabArrayBase const &fa)
std::unique_ptr< char, TheFaArenaDeleter > TheFaArenaPointer
Definition: AMReX_FabArray.H:104
IntVect nGrowVect(FabArrayBase const &fa)
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & max(const T &a, const T &b) noexcept
Definition: AMReX_Algorithm.H:35
void Copy(FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:179
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T abs(const GpuComplex< T > &a_z) noexcept
Return the absolute value of a complex number.
Definition: AMReX_GpuComplex.H:356
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition: AMReX_Tuple.H:179
Long nBytesOwned(T const &) noexcept
Definition: AMReX_FabArray.H:58
BoxArray const & boxArray(FabArrayBase const &fa)
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray.
Definition: AMReX_ParReduce.H:47
Arena * The_Comms_Arena()
Definition: AMReX_Arena.cpp:669
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition: AMReX_FabArrayUtility.H:1614
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
void Add(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:240
Arena * The_Pinned_Arena()
Definition: AMReX_Arena.cpp:649
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
void OverrideSync_finish(FabArray< FAB > &fa)
Definition: AMReX_FabArrayUtility.H:1382
void update_fab_stats(Long n, Long s, size_t szt) noexcept
Definition: AMReX_BaseFab.cpp:144
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition: AMReX_FabArrayUtility.H:1607
Arena * The_Arena()
Definition: AMReX_Arena.cpp:609
void OverrideSync_nowait(FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
Definition: AMReX_FabArrayUtility.H:1335
Definition: AMReX_TagParallelFor.H:57
Definition: AMReX_Array4.H:61
Definition: AMReX_DataAllocator.H:9
Arena * arena() const noexcept
Definition: AMReX_DataAllocator.H:24
Arena * m_arena
Definition: AMReX_DataAllocator.H:10
Definition: AMReX_FabArray.H:108
const FabArrayBase::FB * fb
Definition: AMReX_FabArray.H:110
char * the_recv_data
Definition: AMReX_FabArray.H:115
Vector< MPI_Request > recv_reqs
Definition: AMReX_FabArray.H:120
Vector< char * > recv_data
Definition: AMReX_FabArray.H:118
Vector< MPI_Status > recv_stat
Definition: AMReX_FabArray.H:121
int scomp
Definition: AMReX_FabArray.H:111
Vector< int > recv_from
Definition: AMReX_FabArray.H:117
char * the_send_data
Definition: AMReX_FabArray.H:116
Vector< MPI_Request > send_reqs
Definition: AMReX_FabArray.H:124
Vector< char * > send_data
Definition: AMReX_FabArray.H:123
Vector< std::size_t > recv_size
Definition: AMReX_FabArray.H:119
int ncomp
Definition: AMReX_FabArray.H:112
int tag
Definition: AMReX_FabArray.H:125
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
Used by a bunch of routines when communicating via MPI.
Definition: AMReX_FabArrayBase.H:194
Box dbox
Definition: AMReX_FabArrayBase.H:195
int dstIndex
Definition: AMReX_FabArrayBase.H:197
FillBoundary.
Definition: AMReX_FabArrayBase.H:487
void recordBuild() noexcept
Definition: AMReX_FabArrayBase.H:701
Definition: AMReX_FabArray.H:347
FAB value_type
Definition: AMReX_FabArray.H:348
for shared memory
Definition: AMReX_FabArray.H:1291
ShMem(ShMem &&rhs) noexcept
Definition: AMReX_FabArray.H:1305
Long n_values
Definition: AMReX_FabArray.H:1332
Long n_points
Definition: AMReX_FabArray.H:1333
bool alloc
Definition: AMReX_FabArray.H:1331
ShMem(const ShMem &)=delete
ShMem & operator=(ShMem &&rhs) noexcept
Definition: AMReX_FabArray.H:1316
Definition: AMReX_FabFactory.H:27
FabInfo & SetArena(Arena *ar) noexcept
Definition: AMReX_FabFactory.H:42
FabInfo & SetShared(bool s) noexcept
Definition: AMReX_FabFactory.H:37
FabInfo & SetAlloc(bool a) noexcept
Definition: AMReX_FabFactory.H:32
Definition: AMReX_TypeTraits.H:18
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
MFInfo & SetTag(T &&t, Ts &&... ts) noexcept
Definition: AMReX_FabArray.H:92
Arena * arena
Definition: AMReX_FabArray.H:70
bool alloc
Definition: AMReX_FabArray.H:68
MFInfo & SetTag() noexcept
Definition: AMReX_FabArray.H:79
MFInfo & SetAllocSingleChunk(bool a) noexcept
Definition: AMReX_FabArray.H:75
MFInfo & SetArena(Arena *ar) noexcept
Definition: AMReX_FabArray.H:77
MFInfo & SetAlloc(bool a) noexcept
Definition: AMReX_FabArray.H:73
bool alloc_single_chunk
Definition: AMReX_FabArray.H:69
Vector< std::string > tags
Definition: AMReX_FabArray.H:71
MFInfo & SetTag(const char *t) noexcept
Definition: AMReX_FabArray.H:81
MFInfo & SetTag(const std::string &t) noexcept
Definition: AMReX_FabArray.H:86
Definition: AMReX_FabArray.H:152
Array4< T > const *AMREX_RESTRICT dp
Definition: AMReX_FabArray.H:166
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Array4< T > const & operator[](int li) const noexcept
Definition: AMReX_FabArray.H:154
Array4< T > const *AMREX_RESTRICT hp
Definition: AMReX_FabArray.H:168
Definition: AMReX_FabArray.H:131
int actual_n_rcvs
Definition: AMReX_FabArray.H:137
Vector< std::size_t > recv_size
Definition: AMReX_FabArray.H:144
int DC
Definition: AMReX_FabArray.H:138
Vector< MPI_Request > send_reqs
Definition: AMReX_FabArray.H:146
int tag
Definition: AMReX_FabArray.H:136
const FabArray< FAB > * src
Definition: AMReX_FabArray.H:134
char * the_recv_data
Definition: AMReX_FabArray.H:140
FabArrayBase::CpOp op
Definition: AMReX_FabArray.H:135
Vector< MPI_Request > recv_reqs
Definition: AMReX_FabArray.H:145
char * the_send_data
Definition: AMReX_FabArray.H:141
const FabArrayBase::CPC * cpc
Definition: AMReX_FabArray.H:133
Vector< int > recv_from
Definition: AMReX_FabArray.H:142
int NC
Definition: AMReX_FabArray.H:138
int SC
Definition: AMReX_FabArray.H:138
Vector< char * > recv_data
Definition: AMReX_FabArray.H:143
const team_t & get() const
Definition: AMReX_ParallelDescriptor.H:185
void MemoryBarrier() const
memory fence
Definition: AMReX_ParallelDescriptor.H:157
Definition: AMReX_FabArray.H:98
char * pointer
Definition: AMReX_FabArray.H:99
void operator()(pointer p) const noexcept
Definition: AMReX_FabArray.H:100
Struct for holding types.
Definition: AMReX_TypeList.H:12