20template <
class T0,
class T1>
26 *d =
static_cast<T0
>(s);
30template <
class T0,
class T1>
36 *d +=
static_cast<T0
>(s);
40template <
class T0,
class T1>
43 template<class U0=T0, std::enable_if_t<amrex::HasAtomicAdd<U0>::value,
int> = 0>
47 Gpu::Atomic::AddNoRet(d,
static_cast<U0
>(s));
51template <
class T0,
class T1,
class F>
53fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
int ncomp,
56 TagVector<Array4CopyTag<T0, T1>> tv{copy_tags};
58 detail::ParallelFor_doit(tv,
60 int icell,
int ncells,
int i,
int j,
int k, Array4CopyTag<T0, T1>
const& tag)
noexcept
63 for (
int n = 0; n < ncomp; ++n) {
64 f(&(tag.dfab(i,j,k,n+dcomp)),
65 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
71template <
class TagType,
class F>
75 amrex::ParallelFor(tags,
78 int m = Gpu::Atomic::Add(&(tag.mask(i,j,k)), 1);
80 for (
int n = 0; n < ncomp; ++n) {
81 f(&(tag.dfab(i,j,k,n+dcomp)),
82 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
88template <
class TagType,
class F>
92 amrex::ParallelFor(tags,
95 int* m = &(tag.mask(i,j,k));
98#if defined(AMREX_USE_SYCL)
99 my_turn = (Gpu::Atomic::Exch(m, 1) == 0);
101 my_turn = (Gpu::Atomic::CAS(m, 0, 1) == 0);
104#if defined(AMREX_USE_SYCL)
105 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
109 for (
int n = 0; n < ncomp; ++n) {
110 f(&(tag.dfab(i,j,k,n+dcomp)),
111 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
113#if defined(AMREX_USE_SYCL)
114 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
118 Gpu::Atomic::Exch(m, 0);
121#if defined(AMREX_USE_CUDA)
123#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)
126 for (
int c = 0; c < 2; ++c) {
130 for (
int c = 0; c < 2; ++c) {
131 __asm__
volatile(
"");
138#elif defined(AMREX_USE_HIP)
140 __builtin_amdgcn_s_sleep(1);
142#elif defined(AMREX_USE_SYCL)
144 for (
int c = 0; c < 2; ++c) {
145 __asm__
volatile(
"");
154template <
class T0,
class T1,
class F>
156fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
157 int ncomp, F && f, Vector<Array4Tag<int> >
const& masks)
159 using TagType = Array4MaskCopyTag<T0, T1>;
160 Vector<TagType> tags;
161 const int N = copy_tags.size();
163 for (
int i = 0; i < N; ++i) {
164 tags.push_back(TagType{copy_tags[i].dfab, copy_tags[i].sfab, masks[i].dfab,
165 copy_tags[i].dbox, copy_tags[i].offset});
168 if constexpr (std::is_same_v<F, CellStore<T0,T1>>)
179template <
typename T0,
typename T1,
180 std::enable_if_t<amrex::IsStoreAtomic<T0>::value,
int> = 0>
183 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
188template <
typename T0,
typename T1,
189 std::enable_if_t<!amrex::IsStoreAtomic<T0>::value,
int> = 0>
192 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
194 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellStore<T0, T1>(), masks);
197template <
typename T0,
typename T1,
198 std::enable_if_t<amrex::HasAtomicAdd<T0>::value,
int> = 0>
201 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
206template <
typename T0,
typename T1,
207 std::enable_if_t<!amrex::HasAtomicAdd<T0>::value,
int> = 0>
210 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
212 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellAdd<T0, T1>(), masks);
215template <
typename T0,
typename T1,
class F>
217 int dcomp,
int ncomp, F
const& f)
219 if (a_tags.empty()) {
return; }
221 using TagType = Array4CopyTag<T0,T1>;
225 std::pair<int,Box> dindex_tilebox;
226 bool operator< (TiledTag
const& rhs)
const noexcept {
227 return this->dindex_tilebox < rhs.dindex_tilebox;
229 bool operator!= (TiledTag
const& rhs)
const noexcept {
230 return this->dindex_tilebox != rhs.dindex_tilebox;
233 Vector<TiledTag> tiled_tags;
235 auto const ixtype = a_tags[0].dbox.ixType();
237 constexpr int tile_size = 64;
238 for (
int itag = 0; itag < a_tags.size(); ++itag) {
239 auto const& tag = a_tags[itag];
240 auto const& dlo = tag.dbox.smallEnd();
241 auto const& dhi = tag.dbox.bigEnd();
242 IntVect tlo(
AMREX_D_DECL(amrex::coarsen<tile_size>(dlo[0]),
243 amrex::coarsen<tile_size>(dlo[1]),
244 amrex::coarsen<tile_size>(dlo[2])));
245 IntVect thi(
AMREX_D_DECL(amrex::coarsen<tile_size>(dhi[0]),
246 amrex::coarsen<tile_size>(dhi[1]),
247 amrex::coarsen<tile_size>(dhi[2])));
248#if (AMREX_SPACEDIM == 3)
249 for (
int kt = tlo[2]; kt <= thi[2]; ++kt)
252#if (AMREX_SPACEDIM >= 2)
253 for (
int jt = tlo[1]; jt <= thi[1]; ++jt)
256 for (
int it = tlo[0]; it <= thi[0]; ++it)
261 tiled_tags.push_back(TiledTag{itag, std::make_pair
262 (tag.dindex, Box(lo, lo+(tile_size-1), ixtype))});
268 std::sort(tiled_tags.begin(), tiled_tags.end());
270 Gpu::HostVector<unsigned int> h_ntags;
271 Gpu::HostVector<TagType> h_tags;
272 h_tags.reserve(tiled_tags.size());
274 for (
unsigned int itag = 0; itag < tiled_tags.size(); ++itag) {
276 h_ntags.push_back(0);
277 }
else if (tiled_tags[itag-1] != tiled_tags[itag]) {
278 h_ntags.push_back(itag);
280 auto const& ttag = tiled_tags[itag];
281 auto const& btag = a_tags[ttag.tag_index];
282 h_tags.push_back({btag.dfab, btag.dindex, btag.sfab,
283 btag.dbox & ttag.dindex_tilebox.second, btag.offset});
285 h_ntags.push_back((
unsigned int)tiled_tags.size());
287 Gpu::DeviceVector<TagType> d_tags(h_tags.size());
288 Gpu::DeviceVector<unsigned int> d_ntags(h_ntags.size());
289 Gpu::copyAsync(Gpu::hostToDevice,h_tags.begin(),h_tags.end(),d_tags.begin());
290 Gpu::copyAsync(Gpu::hostToDevice,h_ntags.begin(),h_ntags.end(),d_ntags.begin());
291 auto const* ptag = d_tags.data();
292 auto const* pntags = d_ntags.data();
293 auto const nblocks = int(h_ntags.size()-1);
294 constexpr auto nthreads = 256;
295 amrex::launch<nthreads>(nblocks, Gpu::gpuStream(),
298 [[sycl::reqd_work_group_size(nthreads)]]
304 Dim1 blockIdx{item.get_group_linear_id()};
305 Dim1 threadIdx{item.get_local_linear_id()};
308 for (
unsigned int itag = pntags[blockIdx.x]; itag < pntags[blockIdx.x+1]; ++itag) {
309 auto const tag = ptag[itag];
310 auto ncells = int(tag.dbox.numPts());
313 for (
int icell =
int(threadIdx.x); icell < ncells; icell += nthreads) {
314 int k = icell / (len.x*len.y);
315 int j = (icell - k*(len.x*len.y)) / len.x;
316 int i = (icell - k*(len.x*len.y)) - j*len.x;
320 for (
int n = 0; n < ncomp; ++n) {
321 f(tag.dfab.ptr(i,j,k,n+dcomp),
322 tag.sfab(i + tag.offset.x,
324 k + tag.offset.z, n+scomp));
328 if (itag+1 < pntags[blockIdx.x+1]) {
330 sycl::group_barrier(item.get_group());
337 Gpu::streamSynchronize();
348 auto const& LocTags = *(TheFB.
m_LocTags);
349 auto N_locs =
static_cast<int>(LocTags.size());
350 if (N_locs == 0) {
return; }
355#pragma omp parallel for
357 for (
int i = 0; i < N_locs; ++i)
366 dfab->template copy<RunOn::Host>(*sfab, tag.
sbox, scomp, tag.
dbox, scomp, ncomp);
372 for (
int i = 0; i < N_locs; ++i)
379 loc_copy_tags[tag.
dstIndex].push_back
387 const auto& tags = loc_copy_tags[mfi];
388 auto dfab = this->array(mfi);
389 for (
auto const & tag : tags)
391 auto const sfab = tag.sfab->array();
392 const auto offset = tag.offset.dim3();
394 [=] (
int i,
int j,
int k,
int n)
noexcept
396 dfab(i,j,k,n+scomp) = sfab(i+offset.x,j+offset.y,k+offset.z,n+scomp);
407 auto const& LocTags = *(TheFB.
m_LocTags);
408 auto N_locs =
static_cast<int>(LocTags.size());
409 if (N_locs == 0) {
return; }
413 std::vector<FAB> src_fabs(N_locs);
414 for (
int itag = 0; itag < N_locs; ++itag) {
416 src_fabs[itag].resize(tag.
sbox,ncomp);
417 loc_copy_tags[tag.
dstIndex].push_back
423#pragma omp parallel for
425 for (
int itag = 0; itag < N_locs; ++itag) {
427 src_fabs[itag].template copy<RunOn::Host>(this->
operator[](tag.
srcIndex), scomp, 0, ncomp);
435 const auto& tags = loc_copy_tags[mfi];
436 const auto& dfab = this->array(mfi);
437 for (
auto const & tag : tags)
439 auto const sfab = tag.sfab->array();
440 const auto offset = tag.offset.dim3();
442 [&] (
int i,
int j,
int k,
int n)
noexcept
444 dfab(i,j,k,n+scomp) += sfab(i+offset.x,j+offset.y,k+offset.z,n);
456 auto const& LocTags = *(TheFB.m_LocTags);
457 int N_locs = LocTags.size();
462 if (
auto it = m_fb_local_copy_handler.find(TheFB.m_id);
463 it != m_fb_local_copy_handler.end())
465 tv = it->second.get();
468 loc_copy_tags.reserve(N_locs);
470 for (
int i = 0; i < N_locs; ++i)
477 int li = this->localindex(tag.
dstIndex);
478 loc_copy_tags.push_back
479 ({this->atLocalIdx(li).array(), tag.
dstIndex,
480 this->fabPtr(tag.
srcIndex)->const_array(),
485 auto utv = std::make_unique<TagVector<TagType>>(loc_copy_tags);
487 m_fb_local_copy_handler[TheFB.m_id] = std::move(utv);
497 auto const& LocTags = *(TheFB.
m_LocTags);
498 int N_locs = LocTags.
size();
499 if (N_locs == 0) {
return; }
506 auto* tv = FB_get_local_copy_tag_vector(TheFB);
508 detail::ParallelFor_doit(*tv,
510 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
512 if (icell < ncells) {
513 for (
int n = 0; n < ncomp; ++n) {
514 tag.dfab(i,j,k,n+scomp) = tag.sfab(i+tag.offset.x,
516 k+tag.offset.z,n+scomp);
524 loc_copy_tags.reserve(N_locs);
528 masks_unique.reserve(this->local_size());
530 masks.reserve(N_locs);
532 for (
int i = 0; i < N_locs; ++i)
539 int li = this->localindex(tag.
dstIndex);
540 loc_copy_tags.push_back
541 ({this->atLocalIdx(li).array(), tag.
dstIndex,
542 this->fabPtr(tag.
srcIndex)->const_array(),
546 if (!maskfabs[li].isAllocated()) {
547 maskfabs[li].resize(this->atLocalIdx(li).box());
559 detail::fab_to_fab_atomic_cpy<value_type, value_type>(
560 loc_copy_tags, scomp, scomp, ncomp, masks);
568 auto const& LocTags = *(TheFB.
m_LocTags);
569 int N_locs = LocTags.
size();
570 if (N_locs == 0) {
return; }
575 loc_copy_tags_1.reserve(N_locs);
576 loc_copy_tags_2.reserve(N_locs);
579 for (
int itag = 0; itag < N_locs; ++itag) {
581 src_fabs[itag].resize(tag.
sbox,ncomp);
582 loc_copy_tags_1.push_back(
583 TagType{src_fabs[itag].array(), -1,
586 loc_copy_tags_2.push_back(
588 src_fabs[itag].const_array(), tag.
dbox,
617 auto const& LocTags = *(thecmd.
m_LocTags);
618 int N_locs = LocTags.
size();
619 if (N_locs == 0) {
return; }
624 loc_setval_tags.reserve(N_locs);
628 for (
int i = 0; i < N_locs; ++i)
632 loc_setval_tags.push_back({this->array(tag.
dstIndex), tag.
dbox});
636 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
638 tag.dfab(i,j,k,n+scomp) =
x;
647 auto const& RcvTags = *(thecmd.
m_RcvTags);
653 for (
auto it = RcvTags.begin(); it != RcvTags.end(); ++it) {
654 for (
auto const& tag: it->second) {
655 rcv_setval_tags.push_back({this->array(tag.dstIndex), tag.dbox});
659 if (rcv_setval_tags.empty()) {
return; }
664 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
666 tag.dfab(i,j,k,n+scomp) =
x;
670#if defined(__CUDACC__) && defined (AMREX_USE_CUDA)
675 const int N_locs = (*TheFB.m_LocTags).size();
677 for (
int i = 0; i < N_locs; ++i)
679 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
681 BL_ASSERT(distributionMap[tag.dstIndex] == ParallelDescriptor::MyProc());
682 BL_ASSERT(distributionMap[tag.srcIndex] == ParallelDescriptor::MyProc());
684 loc_copy_tags[tag.dstIndex].push_back
685 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
689 if ( !(TheFB.m_localCopy.ready()) )
691 const_cast<FB&
>(TheFB).m_localCopy.resize(N_locs);
695 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
697 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
698 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
699 (TheFB).m_localCopy.getDevicePtr(0),
700 std::size_t(
sizeof(CopyMemory)*N_locs) );
702 const auto& tags = loc_copy_tags[mfi];
703 for (
auto const & tag : tags)
705 const auto offset = tag.offset.dim3();
706 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
710 auto const dst = cmem->getDst<value_type>();
711 auto const src = cmem->getSrc<value_type>();
712 for (int n = 0; n < cmem->ncomp; ++n) {
713 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
718 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
719 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
720 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
729 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
731 auto const dst_array = this->array(mfi);
732 const auto& tags = loc_copy_tags[mfi];
733 for (
auto const & tag : tags)
735 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
742 TheFB.m_localCopy.executeGraph();
748FabArray<FAB>::FB_local_copy_cuda_graph_n (
const FB& TheFB,
int scomp,
int ncomp)
750 const int N_locs = TheFB.m_LocTags->size();
754 for (
int i = 0; i < N_locs; ++i)
756 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
758 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.dstIndex]));
759 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.srcIndex]));
761 if (distributionMap[tag.dstIndex] == ParallelDescriptor::MyProc())
763 loc_copy_tags[tag.dstIndex].push_back
764 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
771 if ( !(TheFB.m_localCopy.ready()) )
773 const_cast<FB&
>(TheFB).m_localCopy.resize(launches);
777 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
779 const auto& tags = loc_copy_tags[mfi];
780 for (
int t = 0; t<tags.size(); ++t)
782 Gpu::Device::setStreamIndex(cuda_stream++);
783 amrex::Gpu::Device::startGraphRecording( (idx == 0),
784 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
785 (TheFB).m_localCopy.getDevicePtr(0),
786 std::size_t(
sizeof(CopyMemory)*launches) );
788 const auto& tag = tags[t];
789 const Dim3
offset = tag.offset.dim3();
791 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
794 auto const dst = cmem->getDst<value_type>();
795 auto const src = cmem->getSrc<value_type>();
796 for (int n = 0; n < cmem->ncomp; ++n) {
797 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
801 bool last_iter = idx == launches;
802 cudaGraphExec_t graphExec = Gpu::Device::stopGraphRecording(last_iter);
803 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
811 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
813 const auto& dst_array = this->array(mfi);
814 const auto& tags = loc_copy_tags[mfi];
815 for (
auto const & tag : tags)
817 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
824 TheFB.m_localCopy.executeGraph(
false);
836#if defined(__CUDACC__) && defined(AMREX_USE_CUDA)
840FabArray<FAB>::FB_pack_send_buffer_cuda_graph (
const FB& TheFB,
int scomp,
int ncomp,
841 Vector<char*>& send_data,
842 Vector<std::size_t>
const& send_size,
843 Vector<
typename FabArray<FAB>::CopyComTagsContainer
const*>
const& send_cctc)
845 const int N_snds = send_data.size();
846 if (N_snds == 0) {
return; }
848 if ( !(TheFB.m_copyToBuffer.ready()) )
853 for (
int send = 0; send < N_snds; ++send) {
854 if (send_size[send] > 0) {
855 launches += send_cctc[send]->size();
858 const_cast<FB&
>(TheFB).m_copyToBuffer.resize(launches);
862 for (Gpu::StreamIter sit(N_snds,Gpu::StreamItInfo().DisableDeviceSync());
863 sit.isValid(); ++sit)
865 amrex::Gpu::Device::startGraphRecording( (sit() == 0),
866 const_cast<FB&
>(TheFB).m_copyToBuffer.getHostPtr(0),
867 (TheFB).m_copyToBuffer.getDevicePtr(0),
868 std::size_t(
sizeof(CopyMemory)*launches) );
871 if (send_size[j] > 0)
873 auto const& cctc = *send_cctc[j];
874 for (
auto const& tag : cctc)
876 const Box& bx = tag.sbox;
877 CopyMemory* cmem = TheFB.m_copyToBuffer.getDevicePtr(idx++);
880 auto const pfab = cmem->getDst<value_type>();
881 auto const sfab = cmem->getSrc<value_type>();
882 for (
int n = 0; n < cmem->ncomp; ++n)
884 pfab(ii,jj,kk,n) = sfab(ii,jj,kk,n+(cmem->scomp));
890 bool last_iter = sit() == (N_snds-1);
891 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
892 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyToBuffer.setGraph( graphExec ); }
898 for (
int send = 0; send < N_snds; ++send)
901 if (send_size[j] > 0)
903 char* dptr = send_data[j];
904 auto const& cctc = *send_cctc[j];
905 for (
auto const& tag : cctc)
907 const_cast<FB&
>(TheFB).m_copyToBuffer.setParams(idx++, makeCopyMemory(this->array(tag.srcIndex),
913 dptr += (tag.sbox.numPts() * ncomp *
sizeof(value_type));
916 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
921 TheFB.m_copyToBuffer.executeGraph();
926FabArray<FAB>::FB_unpack_recv_buffer_cuda_graph (
const FB& TheFB,
int dcomp,
int ncomp,
927 Vector<char*>
const& recv_data,
928 Vector<std::size_t>
const& recv_size,
929 Vector<CopyComTagsContainer const*>
const& recv_cctc,
932 const int N_rcvs = recv_cctc.size();
933 if (N_rcvs == 0) {
return; }
937 for (
int k = 0; k < N_rcvs; ++k)
939 if (recv_size[k] > 0)
941 const char* dptr = recv_data[k];
942 auto const& cctc = *recv_cctc[k];
943 for (
auto const& tag : cctc)
945 recv_copy_tags[tag.dstIndex].push_back({dptr,tag.dbox});
946 dptr += tag.dbox.numPts() * ncomp *
sizeof(value_type);
950 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
954 if ( !(TheFB.m_copyFromBuffer.ready()) )
956 const_cast<FB&
>(TheFB).m_copyFromBuffer.resize(launches);
959 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
961 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
962 const_cast<FB&
>(TheFB).m_copyFromBuffer.getHostPtr(0),
963 (TheFB).m_copyFromBuffer.getDevicePtr(0),
964 std::size_t(
sizeof(CopyMemory)*launches) );
966 const auto& tags = recv_copy_tags[mfi];
967 for (
auto const & tag : tags)
969 CopyMemory* cmem = TheFB.m_copyFromBuffer.getDevicePtr(idx++);
972 auto const pfab = cmem->getSrc<value_type>();
973 auto const dfab = cmem->getDst<value_type>();
974 for (int n = 0; n < cmem->ncomp; ++n)
976 dfab(i,j,k,n+(cmem->scomp)) = pfab(i,j,k,n);
981 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
982 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
983 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyFromBuffer.setGraph( graphExec ); }
989 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
991 auto dst_array = this->array(mfi);
992 const auto & tags = recv_copy_tags[mfi];
993 for (
auto const & tag : tags)
995 const_cast<FB&
>(TheFB).m_copyFromBuffer.setParams(idx++, makeCopyMemory(
amrex::makeArray4((value_type*)(tag.p),
1004 TheFB.m_copyFromBuffer.executeGraph();
1010template <
typename BUF>
1015 int ncomp, std::uint64_t
id)
const
1020 auto kit = std::find_if(send_cctc.begin(), send_cctc.end(),
1022 if (kit == send_cctc.end()) {
1029 char* pbuf = send_data[0];
1030 const int N_snds = send_data.
size();
1031 for (
int j = 0; j < N_snds; ++j)
1033 if (send_size[j] > 0)
1035 char* dptr = send_data[j];
1036 auto const& cctc = *send_cctc[j];
1037 for (
auto const& tag : cctc)
1039 snd_copy_tags.emplace_back
1040 (TagType{this->const_array(tag.srcIndex), dptr-pbuf, tag.sbox});
1041 dptr += (tag.sbox.numPts() * ncomp *
sizeof(BUF));
1045 return snd_copy_tags;
1049 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1051 if (
auto it = m_send_copy_handler.find(key); it != m_send_copy_handler.end()) {
1052 tv = it->second.get();
1054 if (m_send_copy_handler.size() > 32) {
1059 m_send_copy_handler.clear();
1061 auto snd_copy_tags = get_tags();
1062 auto utv = std::make_unique<TagVector<TagType>>(snd_copy_tags);
1064 m_send_copy_handler[key] = std::move(utv);
1071template <
typename BUF>
1079 const int N_snds = send_data.
size();
1080 if (N_snds == 0) {
return; }
1084 auto* tv = src.template get_send_copy_tag_vector<BUF>
1085 (send_data, send_size, send_cctc, ncomp,
id);
1086 if (tv ==
nullptr) {
return; }
1088 char* pbuffer = send_data[0];
1090 detail::ParallelFor_doit(*tv,
1092 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1094 if (icell < ncells) {
1097 for (
int n = 0; n < ncomp; ++n) {
1098 dfab(i,j,k,n) = (BUF)tag.sfab(i,j,k,n+scomp);
1103 Gpu::streamSynchronize();
1107template <
typename BUF>
1112 int ncomp, std::uint64_t
id)
1117 auto kit = std::find_if(recv_cctc.begin(), recv_cctc.end(),
1119 if (kit == recv_cctc.end()) {
1126 char* pbuf = recv_data[0];
1127 const int N_rcvs = recv_cctc.
size();
1128 for (
int k = 0; k < N_rcvs; ++k)
1130 if (recv_size[k] > 0)
1132 char* dptr = recv_data[k];
1133 auto const& cctc = *recv_cctc[k];
1134 for (
auto const& tag : cctc)
1136 const int li = this->localindex(tag.dstIndex);
1137 recv_copy_tags.emplace_back
1138 (TagType{this->atLocalIdx(li).array(), dptr-pbuf, tag.dbox});
1139 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1143 return recv_copy_tags;
1147 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1149 if (
auto it = m_recv_copy_handler.find(key); it != m_recv_copy_handler.end()) {
1150 tv = it->second.get();
1152 if (m_recv_copy_handler.size() > 32) {
1157 m_recv_copy_handler.clear();
1159 auto recv_copy_tags = get_tags();
1160 auto utv = std::make_unique<TagVector<TagType>>(recv_copy_tags);
1162 m_recv_copy_handler[key] = std::move(utv);
1169template <
typename BUF>
1175 CpOp op,
bool is_thread_safe, std::uint64_t
id,
1178 const int N_rcvs = recv_cctc.
size();
1179 if (N_rcvs == 0) {
return; }
1181 bool use_mask =
false;
1182 if (!is_thread_safe)
1185 (op == FabArrayBase::ADD && !amrex::HasAtomicAdd <value_type>::value))
1196 tags.reserve(N_rcvs);
1197 for (
int k = 0; k < N_rcvs; ++k) {
1198 if (recv_size[k] > 0) {
1199 char const* dptr = recv_data[k];
1200 auto const& cctc = *recv_cctc[k];
1201 for (
auto const& tag : cctc) {
1203 TagType{dst.
array(tag.dstIndex), tag.dstIndex,
1207 tag.dbox,
Dim3{0,0,0}});
1208 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1213 detail::deterministic_fab_to_fab<value_type,BUF>
1222 auto* tv = dst.template get_recv_copy_tag_vector<BUF>
1223 (recv_data, recv_size, recv_cctc, ncomp,
id);
1224 if (tv ==
nullptr) {
return; }
1226 char* pbuffer = recv_data[0];
1228 if (op == FabArrayBase::COPY)
1230 detail::ParallelFor_doit(*tv,
1232 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1234 if (icell < ncells) {
1237 for (
int n = 0; n < ncomp; ++n) {
1238 tag.dfab(i,j,k,n+dcomp) = (
value_type)sfab(i,j,k,n);
1245 if (is_thread_safe) {
1246 detail::ParallelFor_doit(*tv,
1248 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1250 if (icell < ncells) {
1253 for (
int n = 0; n < ncomp; ++n) {
1254 tag.dfab(i,j,k,n+dcomp) += (
value_type)sfab(i,j,k,n);
1260 detail::ParallelFor_doit(*tv,
1262 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1264 if (icell < ncells) {
1267 for (
int n = 0; n < ncomp; ++n) {
1268 Gpu::Atomic::AddNoRet(tag.dfab.ptr(i,j,k,n+dcomp),
1274 amrex::Abort(
"unpack_recv_buffer_gpu: should NOT get here");
1278 Gpu::streamSynchronize();
1282 char* pbuffer = recv_data[0];
1286 recv_copy_tags.reserve(N_rcvs);
1293 for (
int k = 0; k < N_rcvs; ++k)
1295 if (recv_size[k] > 0)
1297 std::size_t
offset = recv_data[k]-recv_data[0];
1298 const char* dptr = pbuffer +
offset;
1299 auto const& cctc = *recv_cctc[k];
1300 for (
auto const& tag : cctc)
1303 recv_copy_tags.emplace_back(TagType{
1309 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1311 if (!maskfabs[li].isAllocated()) {
1312 maskfabs[li].resize(dst.
atLocalIdx(li).box());
1313 masks_unique.emplace_back(Array4Tag<int>{maskfabs[li].array()});
1315 masks.emplace_back(Array4Tag<int>{maskfabs[li].array()});
1327 if (op == FabArrayBase::COPY)
1329 detail::fab_to_fab_atomic_cpy<value_type, BUF>(
1330 recv_copy_tags, 0, dcomp, ncomp, masks);
1334 detail::fab_to_fab_atomic_add<value_type, BUF>(
1335 recv_copy_tags, 0, dcomp, ncomp, masks);
1345template <
typename BUF>
1354 auto const N_snds =
static_cast<int>(send_data.
size());
1355 if (N_snds == 0) {
return; }
1358#pragma omp parallel for
1360 for (
int j = 0; j < N_snds; ++j)
1362 if (send_size[j] > 0)
1364 char* dptr = send_data[j];
1365 auto const& cctc = *send_cctc[j];
1366 for (
auto const& tag : cctc)
1368 const Box& bx = tag.sbox;
1369 auto const sfab = src.
array(tag.srcIndex);
1372 [=] (
int ii,
int jj,
int kk,
int n)
noexcept
1374 pfab(ii,jj,kk,n) =
static_cast<BUF
>(sfab(ii,jj,kk,n+scomp));
1376 dptr += (bx.
numPts() * ncomp *
sizeof(BUF));
1378 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
1384template <
typename BUF>
1390 CpOp op,
bool is_thread_safe)
1394 auto const N_rcvs =
static_cast<int>(recv_cctc.
size());
1395 if (N_rcvs == 0) {
return; }
1400#pragma omp parallel for
1402 for (
int k = 0; k < N_rcvs; ++k)
1404 if (recv_size[k] > 0)
1406 const char* dptr = recv_data[k];
1407 auto const& cctc = *recv_cctc[k];
1408 for (
auto const& tag : cctc)
1410 const Box& bx = tag.dbox;
1411 FAB& dfab = dst[tag.dstIndex];
1412 if (op == FabArrayBase::COPY)
1414 dfab.template copyFromMem<RunOn::Host, BUF>(bx, dcomp, ncomp, dptr);
1418 dfab.template addFromMem<RunOn::Host, BUF>(tag.dbox, dcomp, ncomp, dptr);
1420 dptr += bx.
numPts() * ncomp *
sizeof(BUF);
1422 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1430 for (
int k = 0; k < N_rcvs; ++k)
1432 if (recv_size[k] > 0)
1434 const char* dptr = recv_data[k];
1435 auto const& cctc = *recv_cctc[k];
1436 for (
auto const& tag : cctc)
1438 recv_copy_tags[tag.dstIndex].push_back({dptr,tag.dbox});
1439 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1441 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1450 const auto& tags = recv_copy_tags[mfi];
1451 auto dfab = dst.
array(mfi);
1452 for (
auto const & tag : tags)
1455 if (op == FabArrayBase::COPY)
1458 [=] (
int i,
int j,
int k,
int n)
noexcept
1460 dfab(i,j,k,n+dcomp) = pfab(i,j,k,n);
1466 [=] (
int i,
int j,
int k,
int n)
noexcept
1468 dfab(i,j,k,n+dcomp) += pfab(i,j,k,n);
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:106
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
__host__ __device__ Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition AMReX_Box.H:349
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Get the smallend of the BoxND.
Definition AMReX_Box.H:108
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
CopyComTag::CopyComTagsContainer CopyComTagsContainer
Definition AMReX_FabArrayBase.H:220
int localindex(int K) const noexcept
Return local index in the vector of FABs.
Definition AMReX_FabArrayBase.H:119
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
int local_size() const noexcept
Return the number of local FABs in the FabArray.
Definition AMReX_FabArrayBase.H:113
CpOp
parallel copy or add
Definition AMReX_FabArrayBase.H:394
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:345
typename std::conditional_t< IsBaseFab< FAB >::value, FAB, FABType >::value_type value_type
Definition AMReX_FabArray.H:356
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:561
FAB & atLocalIdx(int L) noexcept
Return a reference to the FAB associated with local index L.
Definition AMReX_FabArray.H:531
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
void define(const BoxArray &a_grids, const DistributionMapping &a_dm)
Definition AMReX_LayoutData.H:25
Definition AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:141
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:191
__host__ __device__ Array4< T > makeArray4(T *p, Box const &bx, int ncomp) noexcept
Definition AMReX_BaseFab.H:87
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition AMReX_Tuple.H:179
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2867
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:326
BoxND< 3 > Box
Definition AMReX_BaseFwd.H:27
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:1899
void LoopConcurrentOnCpu(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition AMReX_Loop.H:378
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:1908
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2862
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:312
Definition AMReX_FabArrayCommI.H:1000
void deterministic_fab_to_fab(Vector< Array4CopyTag< T0, T1 > > const &a_tags, int scomp, int dcomp, int ncomp, F const &f)
Definition AMReX_FBI.H:216
void fab_to_fab_store(Vector< TagType > const &tags, int scomp, int dcomp, int ncomp, F &&f)
Definition AMReX_FBI.H:73
void fab_to_fab(Vector< Array4CopyTag< T0, T1 > > const ©_tags, int scomp, int dcomp, int ncomp, F &&f)
Definition AMReX_FBI.H:53
void fab_to_fab_other(Vector< TagType > const &tags, int scomp, int dcomp, int ncomp, F &&f)
Definition AMReX_FBI.H:90
void fab_to_fab_atomic_add(Vector< Array4CopyTag< T0, T1 > > const ©_tags, int scomp, int dcomp, int ncomp, Vector< Array4Tag< int > > const &)
Definition AMReX_FBI.H:200
void fab_to_fab_atomic_cpy(Vector< Array4CopyTag< T0, T1 > > const ©_tags, int scomp, int dcomp, int ncomp, Vector< Array4Tag< int > > const &)
Definition AMReX_FBI.H:182
IntVect offset
Definition AMReX_FBI.H:8
FAB const * sfab
Definition AMReX_FBI.H:6
Box dbox
Definition AMReX_FBI.H:7
Definition AMReX_FBI.H:11
char const * p
Definition AMReX_FBI.H:12
Box dbox
Definition AMReX_FBI.H:13
Definition AMReX_TagParallelFor.H:58
Definition AMReX_TagParallelFor.H:26
Definition AMReX_TagParallelFor.H:50
Array4< T > dfab
Definition AMReX_TagParallelFor.H:51
Definition AMReX_Array4.H:61
Definition AMReX_TagParallelFor.H:106
Definition AMReX_TagParallelFor.H:116
Definition AMReX_Dim3.H:12
Used by a bunch of routines when communicating via MPI.
Definition AMReX_FabArrayBase.H:195
Box sbox
Definition AMReX_FabArrayBase.H:197
int srcIndex
Definition AMReX_FabArrayBase.H:199
Box dbox
Definition AMReX_FabArrayBase.H:196
int dstIndex
Definition AMReX_FabArrayBase.H:198
FillBoundary.
Definition AMReX_FabArrayBase.H:488
Definition AMReX_TypeTraits.H:56
Definition AMReX_TypeTraits.H:66
Definition AMReX_TypeTraits.H:274
Definition AMReX_TagParallelFor.H:160
Definition AMReX_FBI.H:32
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void operator()(T0 *d, T1 s) const noexcept
Definition AMReX_FBI.H:34
Definition AMReX_FBI.H:42
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void operator()(U0 *d, T1 s) const noexcept
Definition AMReX_FBI.H:45
Definition AMReX_FBI.H:22
AMREX_GPU_DEVICE AMREX_FORCE_INLINE void operator()(T0 *d, T1 s) const noexcept
Definition AMReX_FBI.H:24