23template <
class T0,
class T1>
27 operator() (T0* d, T1 s)
const noexcept
29 *d =
static_cast<T0
>(s);
33template <
class T0,
class T1>
37 operator() (T0* d, T1 s)
const noexcept
39 *d +=
static_cast<T0
>(s);
43template <
class T0,
class T1>
46 template<class U0=T0, std::enable_if_t<amrex::HasAtomicAdd<U0>::value,
int> = 0>
48 operator() (U0* d, T1 s)
const noexcept
50 Gpu::Atomic::AddNoRet(d,
static_cast<U0
>(s));
54template <
class T0,
class T1,
class F>
56fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
int ncomp,
59 TagVector<Array4CopyTag<T0, T1>> tv{copy_tags};
61 detail::ParallelFor_doit(tv,
63 int icell,
int ncells,
int i,
int j,
int k, Array4CopyTag<T0, T1>
const& tag)
noexcept
66 for (
int n = 0; n < ncomp; ++n) {
67 f(&(tag.dfab(i,j,k,n+dcomp)),
68 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
74template <
class TagType,
class F>
76fab_to_fab_store (Vector<TagType>
const& tags,
int scomp,
int dcomp,
int ncomp, F&&f)
81 int m = Gpu::Atomic::Add(&(tag.mask(i,j,k)), 1);
83 for (
int n = 0; n < ncomp; ++n) {
84 f(&(tag.dfab(i,j,k,n+dcomp)),
85 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
91template <
class TagType,
class F>
93fab_to_fab_other (Vector<TagType>
const& tags,
int scomp,
int dcomp,
int ncomp, F&&f)
98 int* m = &(tag.mask(i,j,k));
101#if defined(AMREX_USE_SYCL)
102 my_turn = (Gpu::Atomic::Exch(m, 1) == 0);
104 my_turn = (Gpu::Atomic::CAS(m, 0, 1) == 0);
107#if defined(AMREX_USE_SYCL)
108 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
112 for (
int n = 0; n < ncomp; ++n) {
113 f(&(tag.dfab(i,j,k,n+dcomp)),
114 tag.sfab(i+tag.offset.x,j+tag.offset.y,k+tag.offset.z,n+scomp));
116#if defined(AMREX_USE_SYCL)
117 sycl::atomic_fence(sycl::memory_order::acq_rel, sycl::memory_scope::device);
121 Gpu::Atomic::Exch(m, 0);
124#if defined(AMREX_USE_CUDA)
126#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 700)
129 for (
int c = 0; c < 2; ++c) {
133 for (
int c = 0; c < 2; ++c) {
134 __asm__
volatile(
"");
141#elif defined(AMREX_USE_HIP)
143 __builtin_amdgcn_s_sleep(1);
145#elif defined(AMREX_USE_SYCL)
147 for (
int c = 0; c < 2; ++c) {
148 __asm__
volatile(
"");
157template <
class T0,
class T1,
class F>
159fab_to_fab (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
int dcomp,
160 int ncomp, F && f, Vector<Array4Tag<int> >
const& masks)
162 using TagType = Array4MaskCopyTag<T0, T1>;
163 Vector<TagType> tags;
164 const int N = copy_tags.size();
166 for (
int i = 0; i < N; ++i) {
167 tags.push_back(TagType{copy_tags[i].dfab, copy_tags[i].sfab, masks[i].dfab,
168 copy_tags[i].dbox, copy_tags[i].offset});
171 if constexpr (std::is_same_v<F, CellStore<T0,T1>>)
173 fab_to_fab_store(tags, scomp, dcomp, ncomp, std::forward<F>(f));
177 fab_to_fab_other(tags, scomp, dcomp, ncomp, std::forward<F>(f));
182template <
typename T0,
typename T1,
183 std::enable_if_t<amrex::IsStoreAtomic<T0>::value,
int> = 0>
185fab_to_fab_atomic_cpy (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
186 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
188 fab_to_fab<T0, T1>(copy_tags, scomp, dcomp, ncomp, CellStore<T0, T1>());
191template <
typename T0,
typename T1,
192 std::enable_if_t<!amrex::IsStoreAtomic<T0>::value,
int> = 0>
194fab_to_fab_atomic_cpy (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
195 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
197 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellStore<T0, T1>(), masks);
200template <
typename T0,
typename T1,
201 std::enable_if_t<amrex::HasAtomicAdd<T0>::value,
int> = 0>
203fab_to_fab_atomic_add (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
204 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const&)
206 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellAtomicAdd<T0, T1>());
209template <
typename T0,
typename T1,
210 std::enable_if_t<!amrex::HasAtomicAdd<T0>::value,
int> = 0>
212fab_to_fab_atomic_add (Vector<Array4CopyTag<T0, T1> >
const& copy_tags,
int scomp,
213 int dcomp,
int ncomp, Vector<Array4Tag<int> >
const& masks)
215 fab_to_fab(copy_tags, scomp, dcomp, ncomp, CellAdd<T0, T1>(), masks);
218template <
typename T0,
typename T1,
class F>
219void deterministic_fab_to_fab (Vector<Array4CopyTag<T0,T1>>
const& a_tags,
int scomp,
220 int dcomp,
int ncomp, F
const& f)
222 if (a_tags.empty()) {
return; }
224 using TagType = Array4CopyTag<T0,T1>;
228 std::pair<int,Box> dindex_tilebox;
229 bool operator< (TiledTag
const& rhs)
const noexcept {
230 return this->dindex_tilebox < rhs.dindex_tilebox;
232 bool operator!= (TiledTag
const& rhs)
const noexcept {
233 return this->dindex_tilebox != rhs.dindex_tilebox;
236 Vector<TiledTag> tiled_tags;
238 auto const ixtype = a_tags[0].dbox.ixType();
240 constexpr int tile_size = 64;
241 for (
int itag = 0; itag < a_tags.size(); ++itag) {
242 auto const& tag = a_tags[itag];
243 auto const& dlo = tag.dbox.smallEnd();
244 auto const& dhi = tag.dbox.bigEnd();
246 amrex::coarsen<tile_size>(dlo[1]),
247 amrex::coarsen<tile_size>(dlo[2])));
249 amrex::coarsen<tile_size>(dhi[1]),
250 amrex::coarsen<tile_size>(dhi[2])));
251#if (AMREX_SPACEDIM == 3)
252 for (
int kt = tlo[2]; kt <= thi[2]; ++kt)
255#if (AMREX_SPACEDIM >= 2)
256 for (
int jt = tlo[1]; jt <= thi[1]; ++jt)
259 for (
int it = tlo[0]; it <= thi[0]; ++it)
264 tiled_tags.push_back(TiledTag{itag, std::make_pair
265 (tag.dindex,
Box(lo, lo+(tile_size-1), ixtype))});
271 std::sort(tiled_tags.begin(), tiled_tags.end());
273 Gpu::HostVector<unsigned int> h_ntags;
274 Gpu::HostVector<TagType> h_tags;
275 h_tags.reserve(tiled_tags.size());
277 for (
unsigned int itag = 0; itag < tiled_tags.size(); ++itag) {
279 h_ntags.push_back(0);
280 }
else if (tiled_tags[itag-1] != tiled_tags[itag]) {
281 h_ntags.push_back(itag);
283 auto const& ttag = tiled_tags[itag];
284 auto const& btag = a_tags[ttag.tag_index];
285 h_tags.push_back({btag.dfab, btag.dindex, btag.sfab,
286 btag.dbox & ttag.dindex_tilebox.second, btag.offset});
288 h_ntags.push_back((
unsigned int)tiled_tags.size());
290 Gpu::DeviceVector<TagType> d_tags(h_tags.size());
291 Gpu::DeviceVector<unsigned int> d_ntags(h_ntags.size());
292 Gpu::copyAsync(Gpu::hostToDevice,h_tags.begin(),h_tags.end(),d_tags.begin());
293 Gpu::copyAsync(Gpu::hostToDevice,h_ntags.begin(),h_ntags.end(),d_ntags.begin());
294 auto const* ptag = d_tags.data();
295 auto const* pntags = d_ntags.data();
296 auto const nblocks = int(h_ntags.size()-1);
297 constexpr auto nthreads = 256;
298 amrex::launch<nthreads>(nblocks, Gpu::gpuStream(),
301 [[sycl::reqd_work_group_size(nthreads)]]
307 Dim1 blockIdx{item.get_group_linear_id()};
308 Dim1 threadIdx{item.get_local_linear_id()};
311 for (
unsigned int itag = pntags[blockIdx.x]; itag < pntags[blockIdx.x+1]; ++itag) {
312 auto const tag = ptag[itag];
313 auto ncells = int(tag.dbox.numPts());
316 for (
int icell =
int(threadIdx.x); icell < ncells; icell += nthreads) {
317 int k = icell / (len.x*len.y);
318 int j = (icell - k*(len.x*len.y)) / len.x;
319 int i = (icell - k*(len.x*len.y)) - j*len.x;
323 for (
int n = 0; n < ncomp; ++n) {
324 f(tag.dfab.ptr(i,j,k,n+dcomp),
325 tag.sfab(i + tag.offset.x,
327 k + tag.offset.z, n+scomp));
331 if (itag+1 < pntags[blockIdx.x+1]) {
333 sycl::group_barrier(item.get_group());
340 Gpu::streamSynchronize();
352 auto const& LocTags = *(TheFB.
m_LocTags);
353 auto N_locs =
static_cast<int>(LocTags.size());
354 if (N_locs == 0) {
return; }
359#pragma omp parallel for
361 for (
int i = 0; i < N_locs; ++i)
370 dfab->template copy<RunOn::Host>(*sfab, tag.
sbox, scomp, tag.
dbox, scomp, ncomp);
376 for (
int i = 0; i < N_locs; ++i)
383 loc_copy_tags[tag.
dstIndex].push_back
391 const auto& tags = loc_copy_tags[mfi];
392 auto dfab = this->array(mfi);
393 for (
auto const & tag : tags)
395 auto const sfab = tag.sfab->array();
396 const auto offset = tag.offset.dim3();
398 [=] (
int i,
int j,
int k,
int n)
noexcept
400 dfab(i,j,k,n+scomp) = sfab(i+offset.x,j+offset.y,k+offset.z,n+scomp);
411 auto const& LocTags = *(TheFB.
m_LocTags);
412 auto N_locs =
static_cast<int>(LocTags.size());
413 if (N_locs == 0) {
return; }
417 std::vector<FAB> src_fabs(N_locs);
418 for (
int itag = 0; itag < N_locs; ++itag) {
420 src_fabs[itag].resize(tag.
sbox,ncomp);
421 loc_copy_tags[tag.
dstIndex].push_back
427#pragma omp parallel for
429 for (
int itag = 0; itag < N_locs; ++itag) {
431 src_fabs[itag].template copy<RunOn::Host>(this->
operator[](tag.
srcIndex), scomp, 0, ncomp);
439 const auto& tags = loc_copy_tags[mfi];
440 const auto& dfab = this->array(mfi);
441 for (
auto const & tag : tags)
443 auto const sfab = tag.sfab->array();
444 const auto offset = tag.offset.dim3();
446 [&] (
int i,
int j,
int k,
int n)
noexcept
448 dfab(i,j,k,n+scomp) += sfab(i+offset.x,j+offset.y,k+offset.z,n);
460 auto const& LocTags = *(TheFB.m_LocTags);
461 int N_locs = LocTags.size();
466 if (
auto it = m_fb_local_copy_handler.find(TheFB.m_id);
467 it != m_fb_local_copy_handler.end())
469 tv = it->second.get();
472 loc_copy_tags.reserve(N_locs);
474 for (
int i = 0; i < N_locs; ++i)
481 int li = this->localindex(tag.
dstIndex);
482 loc_copy_tags.push_back
483 ({this->atLocalIdx(li).array(), tag.
dstIndex,
484 this->fabPtr(tag.
srcIndex)->const_array(),
489 auto utv = std::make_unique<TagVector<TagType>>(loc_copy_tags);
491 m_fb_local_copy_handler[TheFB.m_id] = std::move(utv);
501 auto const& LocTags = *(TheFB.
m_LocTags);
502 int N_locs = LocTags.size();
503 if (N_locs == 0) {
return; }
510 auto* tv = FB_get_local_copy_tag_vector(TheFB);
512 detail::ParallelFor_doit(*tv,
514 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
516 if (icell < ncells) {
517 for (
int n = 0; n < ncomp; ++n) {
518 tag.dfab(i,j,k,n+scomp) = tag.sfab(i+tag.offset.x,
520 k+tag.offset.z,n+scomp);
528 loc_copy_tags.reserve(N_locs);
532 masks_unique.reserve(this->local_size());
534 masks.reserve(N_locs);
536 for (
int i = 0; i < N_locs; ++i)
543 int li = this->localindex(tag.
dstIndex);
544 loc_copy_tags.push_back
545 ({this->atLocalIdx(li).array(), tag.
dstIndex,
546 this->fabPtr(tag.
srcIndex)->const_array(),
550 if (!maskfabs[li].isAllocated()) {
551 maskfabs[li].resize(this->atLocalIdx(li).box());
563 detail::fab_to_fab_atomic_cpy<value_type, value_type>(
564 loc_copy_tags, scomp, scomp, ncomp, masks);
572 auto const& LocTags = *(TheFB.
m_LocTags);
573 int N_locs = LocTags.size();
574 if (N_locs == 0) {
return; }
579 loc_copy_tags_1.reserve(N_locs);
580 loc_copy_tags_2.reserve(N_locs);
583 for (
int itag = 0; itag < N_locs; ++itag) {
585 src_fabs[itag].resize(tag.
sbox,ncomp);
586 loc_copy_tags_1.push_back(
587 TagType{src_fabs[itag].array(), -1,
590 loc_copy_tags_2.push_back(
592 src_fabs[itag].const_array(), tag.
dbox,
600 detail::fab_to_fab(loc_copy_tags_1, 0, 0, ncomp,
601 detail::CellStore<value_type, value_type>{});
603 detail::deterministic_fab_to_fab(loc_copy_tags_2, 0, 0, ncomp,
604 detail::CellAdd<value_type,value_type>{});
607 detail::fab_to_fab(loc_copy_tags_2, 0, 0, ncomp,
608 detail::CellAtomicAdd<value_type, value_type>{});
621 auto const& LocTags = *(thecmd.
m_LocTags);
622 int N_locs = LocTags.
size();
623 if (N_locs == 0) {
return; }
628 loc_setval_tags.reserve(N_locs);
632 for (
int i = 0; i < N_locs; ++i)
636 loc_setval_tags.push_back({this->array(tag.
dstIndex), tag.
dbox});
640 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
642 tag.dfab(i,j,k,n+scomp) =
x;
651 auto const& RcvTags = *(thecmd.
m_RcvTags);
657 for (
auto it = RcvTags.begin(); it != RcvTags.end(); ++it) {
658 for (
auto const& tag: it->second) {
659 rcv_setval_tags.push_back({this->array(tag.dstIndex), tag.dbox});
663 if (rcv_setval_tags.empty()) {
return; }
668 [
x,scomp]
AMREX_GPU_DEVICE (
int i,
int j,
int k,
int n, TagType
const& tag)
noexcept
670 tag.dfab(i,j,k,n+scomp) =
x;
674#if defined(__CUDACC__) && defined (AMREX_USE_CUDA)
679 const int N_locs = (*TheFB.m_LocTags).size();
681 for (
int i = 0; i < N_locs; ++i)
683 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
688 loc_copy_tags[tag.dstIndex].push_back
689 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
693 if ( !(TheFB.m_localCopy.ready()) )
695 const_cast<FB&
>(TheFB).m_localCopy.resize(N_locs);
699 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
701 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
702 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
703 (TheFB).m_localCopy.getDevicePtr(0),
704 std::size_t(
sizeof(CopyMemory)*N_locs) );
706 const auto& tags = loc_copy_tags[mfi];
707 for (
auto const & tag : tags)
709 const auto offset = tag.offset.dim3();
710 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
714 auto const dst = cmem->getDst<value_type>();
715 auto const src = cmem->getSrc<value_type>();
716 for (int n = 0; n < cmem->ncomp; ++n) {
717 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
722 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
723 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
724 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
733 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
735 auto const dst_array = this->array(mfi);
736 const auto& tags = loc_copy_tags[mfi];
737 for (
auto const & tag : tags)
739 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
746 TheFB.m_localCopy.executeGraph();
752FabArray<FAB>::FB_local_copy_cuda_graph_n (
const FB& TheFB,
int scomp,
int ncomp)
754 const int N_locs = TheFB.m_LocTags->size();
758 for (
int i = 0; i < N_locs; ++i)
760 const CopyComTag& tag = (*TheFB.m_LocTags)[i];
762 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.dstIndex]));
763 BL_ASSERT(ParallelDescriptor::sameTeam(distributionMap[tag.srcIndex]));
765 if (distributionMap[tag.dstIndex] == ParallelDescriptor::MyProc())
767 loc_copy_tags[tag.dstIndex].push_back
768 ({this->fabPtr(tag.srcIndex), tag.dbox, tag.sbox.smallEnd()-tag.dbox.smallEnd()});
775 if ( !(TheFB.m_localCopy.ready()) )
777 const_cast<FB&
>(TheFB).m_localCopy.resize(launches);
781 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
783 const auto& tags = loc_copy_tags[mfi];
784 for (
int t = 0; t<tags.size(); ++t)
786 Gpu::Device::setStreamIndex(cuda_stream++);
787 amrex::Gpu::Device::startGraphRecording( (idx == 0),
788 const_cast<FB&
>(TheFB).m_localCopy.getHostPtr(0),
789 (TheFB).m_localCopy.getDevicePtr(0),
790 std::size_t(
sizeof(CopyMemory)*launches) );
792 const auto& tag = tags[t];
793 const Dim3
offset = tag.offset.dim3();
795 CopyMemory* cmem = TheFB.m_localCopy.getDevicePtr(idx++);
798 auto const dst = cmem->getDst<value_type>();
799 auto const src = cmem->getSrc<value_type>();
800 for (int n = 0; n < cmem->ncomp; ++n) {
801 dst(i,j,k,(cmem->scomp)+n) = src(i+offset.x,j+offset.y,k+offset.z,(cmem->scomp)+n);
805 bool last_iter = idx == launches;
806 cudaGraphExec_t graphExec = Gpu::Device::stopGraphRecording(last_iter);
807 if (last_iter) {
const_cast<FB&
>(TheFB).m_localCopy.setGraph( graphExec ); }
815 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
817 const auto& dst_array = this->array(mfi);
818 const auto& tags = loc_copy_tags[mfi];
819 for (
auto const & tag : tags)
821 const_cast<FB&
>(TheFB).m_localCopy.setParams(idx++, makeCopyMemory(tag.sfab->array(),
828 TheFB.m_localCopy.executeGraph(
false);
840#if defined(__CUDACC__) && defined(AMREX_USE_CUDA)
844FabArray<FAB>::FB_pack_send_buffer_cuda_graph (
const FB& TheFB,
int scomp,
int ncomp,
845 Vector<char*>& send_data,
846 Vector<std::size_t>
const& send_size,
847 Vector<
typename FabArray<FAB>::CopyComTagsContainer
const*>
const& send_cctc)
849 const int N_snds = send_data.size();
850 if (N_snds == 0) {
return; }
852 if ( !(TheFB.m_copyToBuffer.ready()) )
857 for (
int send = 0; send < N_snds; ++send) {
858 if (send_size[send] > 0) {
859 launches += send_cctc[send]->size();
862 const_cast<FB&
>(TheFB).m_copyToBuffer.resize(launches);
866 for (Gpu::StreamIter sit(N_snds,Gpu::StreamItInfo().DisableDeviceSync());
867 sit.isValid(); ++sit)
869 amrex::Gpu::Device::startGraphRecording( (sit() == 0),
870 const_cast<FB&
>(TheFB).m_copyToBuffer.getHostPtr(0),
871 (TheFB).m_copyToBuffer.getDevicePtr(0),
872 std::size_t(
sizeof(CopyMemory)*launches) );
875 if (send_size[j] > 0)
877 auto const& cctc = *send_cctc[j];
878 for (
auto const& tag : cctc)
880 const Box& bx = tag.sbox;
881 CopyMemory* cmem = TheFB.m_copyToBuffer.getDevicePtr(idx++);
884 auto const pfab = cmem->getDst<value_type>();
885 auto const sfab = cmem->getSrc<value_type>();
886 for (
int n = 0; n < cmem->ncomp; ++n)
888 pfab(ii,jj,kk,n) = sfab(ii,jj,kk,n+(cmem->scomp));
894 bool last_iter = sit() == (N_snds-1);
895 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
896 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyToBuffer.setGraph( graphExec ); }
902 for (
int send = 0; send < N_snds; ++send)
905 if (send_size[j] > 0)
907 char* dptr = send_data[j];
908 auto const& cctc = *send_cctc[j];
909 for (
auto const& tag : cctc)
911 const_cast<FB&
>(TheFB).m_copyToBuffer.setParams(idx++, makeCopyMemory(this->array(tag.srcIndex),
917 dptr += (tag.sbox.numPts() * ncomp *
sizeof(value_type));
920 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
925 TheFB.m_copyToBuffer.executeGraph();
930FabArray<FAB>::FB_unpack_recv_buffer_cuda_graph (
const FB& TheFB,
int dcomp,
int ncomp,
931 Vector<char*>
const& recv_data,
932 Vector<std::size_t>
const& recv_size,
933 Vector<CopyComTagsContainer const*>
const& recv_cctc,
936 const int N_rcvs = recv_cctc.size();
937 if (N_rcvs == 0) {
return; }
941 for (
int k = 0; k < N_rcvs; ++k)
943 if (recv_size[k] > 0)
945 const char* dptr = recv_data[k];
946 auto const& cctc = *recv_cctc[k];
947 for (
auto const& tag : cctc)
949 recv_copy_tags[tag.dstIndex].push_back({dptr,tag.dbox});
950 dptr += tag.dbox.numPts() * ncomp *
sizeof(value_type);
954 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
958 if ( !(TheFB.m_copyFromBuffer.ready()) )
960 const_cast<FB&
>(TheFB).m_copyFromBuffer.resize(launches);
963 for (MFIter mfi(*
this, MFItInfo().DisableDeviceSync()); mfi.isValid(); ++mfi)
965 amrex::Gpu::Device::startGraphRecording( (mfi.LocalIndex() == 0),
966 const_cast<FB&
>(TheFB).m_copyFromBuffer.getHostPtr(0),
967 (TheFB).m_copyFromBuffer.getDevicePtr(0),
968 std::size_t(
sizeof(CopyMemory)*launches) );
970 const auto& tags = recv_copy_tags[mfi];
971 for (
auto const & tag : tags)
973 CopyMemory* cmem = TheFB.m_copyFromBuffer.getDevicePtr(idx++);
976 auto const pfab = cmem->getSrc<value_type>();
977 auto const dfab = cmem->getDst<value_type>();
978 for (int n = 0; n < cmem->ncomp; ++n)
980 dfab(i,j,k,n+(cmem->scomp)) = pfab(i,j,k,n);
985 bool last_iter = mfi.LocalIndex() == (this->local_size()-1);
986 cudaGraphExec_t graphExec = amrex::Gpu::Device::stopGraphRecording(last_iter);
987 if (last_iter) {
const_cast<FB&
>(TheFB).m_copyFromBuffer.setGraph( graphExec ); }
993 for (MFIter mfi(*
this); mfi.isValid(); ++mfi)
995 auto dst_array = this->array(mfi);
996 const auto & tags = recv_copy_tags[mfi];
997 for (
auto const & tag : tags)
999 const_cast<FB&
>(TheFB).m_copyFromBuffer.setParams(idx++, makeCopyMemory(
amrex::makeArray4((value_type*)(tag.p),
1008 TheFB.m_copyFromBuffer.executeGraph();
1014template <
typename BUF>
1019 int ncomp, std::uint64_t
id)
const
1024 auto kit = std::find_if(send_cctc.begin(), send_cctc.end(),
1026 if (kit == send_cctc.end()) {
1033 char* pbuf = send_data[0];
1034 const int N_snds = send_data.
size();
1035 for (
int j = 0; j < N_snds; ++j)
1037 if (send_size[j] > 0)
1039 char* dptr = send_data[j];
1040 auto const& cctc = *send_cctc[j];
1041 for (
auto const& tag : cctc)
1043 snd_copy_tags.emplace_back
1044 (TagType{this->const_array(tag.srcIndex), dptr-pbuf, tag.sbox});
1045 dptr += (tag.sbox.numPts() * ncomp *
sizeof(BUF));
1049 return snd_copy_tags;
1053 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1055 if (
auto it = m_send_copy_handler.find(key); it != m_send_copy_handler.end()) {
1056 tv = it->second.get();
1058 if (m_send_copy_handler.size() > 32) {
1063 m_send_copy_handler.clear();
1065 auto snd_copy_tags = get_tags();
1066 auto utv = std::make_unique<TagVector<TagType>>(snd_copy_tags);
1068 m_send_copy_handler[key] = std::move(utv);
1075template <
typename BUF>
1083 const int N_snds = send_data.
size();
1084 if (N_snds == 0) {
return; }
1088 auto* tv = src.template get_send_copy_tag_vector<BUF>
1089 (send_data, send_size, send_cctc, ncomp,
id);
1090 if (tv ==
nullptr) {
return; }
1092 char* pbuffer = send_data[0];
1094 detail::ParallelFor_doit(*tv,
1096 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1098 if (icell < ncells) {
1101 for (
int n = 0; n < ncomp; ++n) {
1102 dfab(i,j,k,n) = (BUF)tag.sfab(i,j,k,n+scomp);
1107 Gpu::streamSynchronize();
1111template <
typename BUF>
1116 int ncomp, std::uint64_t
id)
1121 auto kit = std::find_if(recv_cctc.begin(), recv_cctc.end(),
1123 if (kit == recv_cctc.end()) {
1130 char* pbuf = recv_data[0];
1131 const int N_rcvs = recv_cctc.
size();
1132 for (
int k = 0; k < N_rcvs; ++k)
1134 if (recv_size[k] > 0)
1136 char* dptr = recv_data[k];
1137 auto const& cctc = *recv_cctc[k];
1138 for (
auto const& tag : cctc)
1140 const int li = this->localindex(tag.dstIndex);
1141 recv_copy_tags.emplace_back
1142 (TagType{this->atLocalIdx(li).array(), dptr-pbuf, tag.dbox});
1143 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1147 return recv_copy_tags;
1151 std::tuple<std::uint64_t,std::size_t,int> key{id,
sizeof(BUF), ncomp};
1153 if (
auto it = m_recv_copy_handler.find(key); it != m_recv_copy_handler.end()) {
1154 tv = it->second.get();
1156 if (m_recv_copy_handler.size() > 32) {
1161 m_recv_copy_handler.clear();
1163 auto recv_copy_tags = get_tags();
1164 auto utv = std::make_unique<TagVector<TagType>>(recv_copy_tags);
1166 m_recv_copy_handler[key] = std::move(utv);
1173template <
typename BUF>
1179 CpOp op,
bool is_thread_safe, std::uint64_t
id,
1182 const int N_rcvs = recv_cctc.
size();
1183 if (N_rcvs == 0) {
return; }
1185 bool use_mask =
false;
1186 if (!is_thread_safe)
1189 (op == FabArrayBase::ADD && !amrex::HasAtomicAdd <value_type>::value))
1200 tags.reserve(N_rcvs);
1201 for (
int k = 0; k < N_rcvs; ++k) {
1202 if (recv_size[k] > 0) {
1203 char const* dptr = recv_data[k];
1204 auto const& cctc = *recv_cctc[k];
1205 for (
auto const& tag : cctc) {
1207 TagType{dst.
array(tag.dstIndex), tag.dstIndex,
1211 tag.dbox,
Dim3{0,0,0}});
1212 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1217 detail::deterministic_fab_to_fab<value_type,BUF>
1218 (tags, 0, dcomp, ncomp, detail::CellAdd<value_type,BUF>{});
1226 auto* tv = dst.template get_recv_copy_tag_vector<BUF>
1227 (recv_data, recv_size, recv_cctc, ncomp,
id);
1228 if (tv ==
nullptr) {
return; }
1230 char* pbuffer = recv_data[0];
1232 if (op == FabArrayBase::COPY)
1234 detail::ParallelFor_doit(*tv,
1236 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1238 if (icell < ncells) {
1241 for (
int n = 0; n < ncomp; ++n) {
1242 tag.dfab(i,j,k,n+dcomp) = (
value_type)sfab(i,j,k,n);
1249 if (is_thread_safe) {
1250 detail::ParallelFor_doit(*tv,
1252 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1254 if (icell < ncells) {
1257 for (
int n = 0; n < ncomp; ++n) {
1258 tag.dfab(i,j,k,n+dcomp) += (
value_type)sfab(i,j,k,n);
1264 detail::ParallelFor_doit(*tv,
1266 int icell,
int ncells,
int i,
int j,
int k, TagType
const& tag)
noexcept
1268 if (icell < ncells) {
1271 for (
int n = 0; n < ncomp; ++n) {
1272 Gpu::Atomic::AddNoRet(tag.dfab.ptr(i,j,k,n+dcomp),
1278 amrex::Abort(
"unpack_recv_buffer_gpu: should NOT get here");
1282 Gpu::streamSynchronize();
1286 char* pbuffer = recv_data[0];
1290 recv_copy_tags.reserve(N_rcvs);
1297 for (
int k = 0; k < N_rcvs; ++k)
1299 if (recv_size[k] > 0)
1301 std::size_t
offset = recv_data[k]-recv_data[0];
1302 const char* dptr = pbuffer +
offset;
1303 auto const& cctc = *recv_cctc[k];
1304 for (
auto const& tag : cctc)
1307 recv_copy_tags.emplace_back(TagType{
1313 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1315 if (!maskfabs[li].isAllocated()) {
1316 maskfabs[li].resize(dst.
atLocalIdx(li).box());
1328 msk.
dfab(i,j,k) = 0;
1331 if (op == FabArrayBase::COPY)
1333 detail::fab_to_fab_atomic_cpy<value_type, BUF>(
1334 recv_copy_tags, 0, dcomp, ncomp, masks);
1338 detail::fab_to_fab_atomic_add<value_type, BUF>(
1339 recv_copy_tags, 0, dcomp, ncomp, masks);
1349template <
typename BUF>
1358 auto const N_snds =
static_cast<int>(send_data.
size());
1359 if (N_snds == 0) {
return; }
1362#pragma omp parallel for
1364 for (
int j = 0; j < N_snds; ++j)
1366 if (send_size[j] > 0)
1368 char* dptr = send_data[j];
1369 auto const& cctc = *send_cctc[j];
1370 for (
auto const& tag : cctc)
1372 const Box& bx = tag.sbox;
1373 auto const sfab = src.
array(tag.srcIndex);
1376 [=] (
int ii,
int jj,
int kk,
int n)
noexcept
1378 pfab(ii,jj,kk,n) =
static_cast<BUF
>(sfab(ii,jj,kk,n+scomp));
1380 dptr += (bx.
numPts() * ncomp *
sizeof(BUF));
1382 BL_ASSERT(dptr <= send_data[j] + send_size[j]);
1388template <
typename BUF>
1394 CpOp op,
bool is_thread_safe)
1398 auto const N_rcvs =
static_cast<int>(recv_cctc.
size());
1399 if (N_rcvs == 0) {
return; }
1404#pragma omp parallel for
1406 for (
int k = 0; k < N_rcvs; ++k)
1408 if (recv_size[k] > 0)
1410 const char* dptr = recv_data[k];
1411 auto const& cctc = *recv_cctc[k];
1412 for (
auto const& tag : cctc)
1414 const Box& bx = tag.dbox;
1415 FAB& dfab = dst[tag.dstIndex];
1416 if (op == FabArrayBase::COPY)
1418 dfab.template copyFromMem<RunOn::Host, BUF>(bx, dcomp, ncomp, dptr);
1422 dfab.template addFromMem<RunOn::Host, BUF>(tag.dbox, dcomp, ncomp, dptr);
1424 dptr += bx.
numPts() * ncomp *
sizeof(BUF);
1426 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1434 for (
int k = 0; k < N_rcvs; ++k)
1436 if (recv_size[k] > 0)
1438 const char* dptr = recv_data[k];
1439 auto const& cctc = *recv_cctc[k];
1440 for (
auto const& tag : cctc)
1442 recv_copy_tags[tag.dstIndex].push_back({dptr,tag.dbox});
1443 dptr += tag.dbox.numPts() * ncomp *
sizeof(BUF);
1445 BL_ASSERT(dptr <= recv_data[k] + recv_size[k]);
1452 for (MFIter mfi(dst); mfi.isValid(); ++mfi)
1454 const auto& tags = recv_copy_tags[mfi];
1455 auto dfab = dst.
array(mfi);
1456 for (
auto const & tag : tags)
1459 if (op == FabArrayBase::COPY)
1462 [=] (
int i,
int j,
int k,
int n)
noexcept
1464 dfab(i,j,k,n+dcomp) = pfab(i,j,k,n);
1470 [=] (
int i,
int j,
int k,
int n)
noexcept
1472 dfab(i,j,k,n+dcomp) += pfab(i,j,k,n);
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:106
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
CopyComTag::CopyComTagsContainer CopyComTagsContainer
Definition AMReX_FabArrayBase.H:220
int localindex(int K) const noexcept
Return local index in the vector of FABs.
Definition AMReX_FabArrayBase.H:119
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
int local_size() const noexcept
Return the number of local FABs in the FabArray.
Definition AMReX_FabArrayBase.H:113
CpOp
parallel copy or add
Definition AMReX_FabArrayBase.H:394
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:347
typename std::conditional_t< IsBaseFab< FAB >::value, FAB, FABType >::value_type value_type
Definition AMReX_FabArray.H:358
void CMD_remote_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition AMReX_FBI.H:648
void FB_local_add_cpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:409
void FB_local_add_gpu(const FB &TheFB, int scomp, int ncomp, bool deterministic)
Definition AMReX_FBI.H:570
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:563
void FB_local_copy_gpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:499
void CMD_local_setVal_gpu(value_type x, const CommMetaData &thecmd, int scomp, int ncomp)
Definition AMReX_FBI.H:618
void FB_local_copy_cpu(const FB &TheFB, int scomp, int ncomp)
Definition AMReX_FBI.H:350
TagVector< Array4CopyTag< value_type > > const * FB_get_local_copy_tag_vector(const FB &TheFB)
Definition AMReX_FBI.H:457
FAB & atLocalIdx(int L) noexcept
Return a reference to the FAB associated with local index L.
Definition AMReX_FabArray.H:533
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
void define(const BoxArray &a_grids, const DistributionMapping &a_dm)
Definition AMReX_LayoutData.H:25
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:63
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:147
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
int MyProc() noexcept
Definition AMReX_ParallelDescriptor.H:126
Definition AMReX_Amr.cpp:49
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
__host__ __device__ Array4< T > makeArray4(T *p, Box const &bx, int ncomp) noexcept
Definition AMReX_BaseFab.H:87
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition AMReX_Tuple.H:186
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2869
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:326
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:27
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
void LoopConcurrentOnCpu(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition AMReX_Loop.H:388
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
BoxArray const & boxArray(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2864
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:312
Definition AMReX_TagParallelFor.H:58
Definition AMReX_TagParallelFor.H:26
Definition AMReX_TagParallelFor.H:50
Array4< T > dfab
Definition AMReX_TagParallelFor.H:51
Definition AMReX_Array4.H:61
Definition AMReX_TagParallelFor.H:106
Definition AMReX_TagParallelFor.H:116
Definition AMReX_Dim3.H:12
Used by a bunch of routines when communicating via MPI.
Definition AMReX_FabArrayBase.H:195
Box sbox
Definition AMReX_FabArrayBase.H:197
int srcIndex
Definition AMReX_FabArrayBase.H:199
Box dbox
Definition AMReX_FabArrayBase.H:196
int dstIndex
Definition AMReX_FabArrayBase.H:198
FillBoundary.
Definition AMReX_FabArrayBase.H:488
IntVect offset
Definition AMReX_FBI.H:10
Box dbox
Definition AMReX_FBI.H:9
FAB const * sfab
Definition AMReX_FBI.H:8
Definition AMReX_TypeTraits.H:56
Definition AMReX_TypeTraits.H:66
Definition AMReX_TypeTraits.H:280
Definition AMReX_TagParallelFor.H:158
Definition AMReX_FBI.H:13
Box dbox
Definition AMReX_FBI.H:15
char const * p
Definition AMReX_FBI.H:14