Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_ParticleTransformation.H
Go to the documentation of this file.
1#ifndef AMREX_PARTICLETRANSFORMATION_H_
2#define AMREX_PARTICLETRANSFORMATION_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_IntVect.H>
6#include <AMReX_Box.H>
7#include <AMReX_Gpu.H>
8#include <AMReX_Print.H>
10#include <AMReX_ParticleUtil.H>
11
12namespace amrex
13{
14
29template <typename T_ParticleType, int NAR, int NAI>
33 int src_i, int dst_i) noexcept
34{
35 AMREX_ASSERT(dst.m_num_runtime_real == src.m_num_runtime_real);
36 AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int );
37
38 if constexpr(!T_ParticleType::is_soa_particle) {
39 dst.m_aos[dst_i] = src.m_aos[src_i];
40 } else {
41 dst.m_idcpu[dst_i] = src.m_idcpu[src_i];
42 }
43 if constexpr(NAR > 0) {
44 for (int j = 0; j < NAR; ++j) {
45 dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i];
46 }
47 }
48 for (int j = 0; j < dst.m_num_runtime_real; ++j) {
49 dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i];
50 }
51 if constexpr(NAI > 0) {
52 for (int j = 0; j < NAI; ++j) {
53 dst.m_idata[j][dst_i] = src.m_idata[j][src_i];
54 }
55 }
56 for (int j = 0; j < dst.m_num_runtime_int; ++j) {
57 dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i];
58 }
59}
60
75template <typename T_ParticleType, int NAR, int NAI>
79 int src_i, int dst_i) noexcept
80{
81 AMREX_ASSERT(dst.m_num_runtime_real == src.m_num_runtime_real);
82 AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int );
83
84 if constexpr(T_ParticleType::is_soa_particle) {
85 dst.m_idcpu[dst_i] = src.m_idcpu[src_i];
86 } else {
87 dst.m_aos[dst_i] = src.m_aos[src_i];
88 }
89 for (int j = 0; j < NAR; ++j) {
90 dst.m_rdata[j][dst_i] = src.m_rdata[j][src_i];
91 }
92 for (int j = 0; j < dst.m_num_runtime_real; ++j) {
93 dst.m_runtime_rdata[j][dst_i] = src.m_runtime_rdata[j][src_i];
94 }
95 for (int j = 0; j < NAI; ++j) {
96 dst.m_idata[j][dst_i] = src.m_idata[j][src_i];
97 }
98 for (int j = 0; j < dst.m_num_runtime_int; ++j) {
99 dst.m_runtime_idata[j][dst_i] = src.m_runtime_idata[j][src_i];
100 }
101}
102
117template <typename T_ParticleType, int NAR, int NAI>
121 int src_i, int dst_i) noexcept
122{
123 AMREX_ASSERT(dst.m_num_runtime_real == src.m_num_runtime_real);
124 AMREX_ASSERT(dst.m_num_runtime_int == src.m_num_runtime_int );
125
126 if constexpr(T_ParticleType::is_soa_particle) {
127 amrex::Swap(src.m_idcpu[src_i], dst.m_idcpu[dst_i]);
128 } else {
129 amrex::Swap(src.m_aos[src_i], dst.m_aos[dst_i]);
130 }
131 if constexpr (NAR > 0) {
132 for (int j = 0; j < NAR; ++j) {
133 amrex::Swap(dst.m_rdata[j][dst_i], src.m_rdata[j][src_i]);
134 }
135 }
136 for (int j = 0; j < dst.m_num_runtime_real; ++j) {
137 amrex::Swap(dst.m_runtime_rdata[j][dst_i], src.m_runtime_rdata[j][src_i]);
138 }
139 if constexpr (NAI > 0) {
140 for (int j = 0; j < NAI; ++j) {
141 amrex::Swap(dst.m_idata[j][dst_i], src.m_idata[j][src_i]);
142 }
143 }
144 for (int j = 0; j < dst.m_num_runtime_int; ++j) {
145 amrex::Swap(dst.m_runtime_idata[j][dst_i], src.m_runtime_idata[j][src_i]);
146 }
147}
148
160template <typename DstTile, typename SrcTile>
161void copyParticles (DstTile& dst, const SrcTile& src) noexcept
162{
163 auto np = src.numParticles();
164 copyParticles(dst, src, 0, 0, np);
165}
166
183template <typename DstTile, typename SrcTile, typename Index, typename N,
184 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
185void copyParticles (DstTile& dst, const SrcTile& src,
186 Index src_start, Index dst_start, N n) noexcept
187{
188 const auto src_data = src.getConstParticleTileData();
189 auto dst_data = dst.getParticleTileData();
190
192 {
193 copyParticle(dst_data, src_data, src_start+i, dst_start+i);
194 });
195
197}
198
212template <typename DstTile, typename SrcTile, typename F>
213void transformParticles (DstTile& dst, const SrcTile& src, F&& f) noexcept
214{
215 auto np = src.numParticles();
216 transformParticles(dst, src, 0, 0, np, std::forward<F>(f));
217}
218
238template <typename DstTile, typename SrcTile, typename Index, typename N, typename F,
239 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
240void transformParticles (DstTile& dst, const SrcTile& src,
241 Index src_start, Index dst_start, N n, F const& f) noexcept
242{
243 const auto src_data = src.getConstParticleTileData();
244 auto dst_data = dst.getParticleTileData();
245
247 {
248 f(dst_data, src_data, src_start+i, dst_start+i);
249 });
250
252}
253
269template <typename DstTile1, typename DstTile2, typename SrcTile, typename F>
270void transformParticles (DstTile1& dst1, DstTile2& dst2, const SrcTile& src, F&& f) noexcept
271{
272 auto np = src.numParticles();
273 transformParticles(dst1, dst2, src, 0, 0, 0, np, std::forward<F>(f));
274}
275
298template <typename DstTile1, typename DstTile2, typename SrcTile,
299 typename Index, typename N, typename F,
300 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
301void transformParticles (DstTile1& dst1, DstTile2& dst2, const SrcTile& src,
302 Index src_start, Index dst1_start, Index dst2_start, N n, F const& f) noexcept
303{
304 const auto src_data = src.getConstParticleTileData();
305 auto dst1_data = dst1.getParticleTileData();
306 auto dst2_data = dst2.getParticleTileData();
307
309 {
310 f(dst1_data, dst2_data, src_data, src_start+i, dst1_start+i, dst2_start+i);
311 });
312
314}
315
328template <typename DstTile, typename SrcTile, typename Index, typename N,
329 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
330Index filterParticles (DstTile& dst, const SrcTile& src, const Index* mask) noexcept
331{
332 return filterParticles(dst, src, mask, 0, 0, src.numParticles());
333}
334
352template <typename DstTile, typename SrcTile, typename Index, typename N,
353 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
354Index filterParticles (DstTile& dst, const SrcTile& src, const Index* mask,
355 Index src_start, Index dst_start, N n) noexcept
356{
357 Gpu::DeviceVector<Index> offsets(n);
358 Gpu::exclusive_scan(mask, mask+n, offsets.begin());
359
360 Index last_mask=0, last_offset=0;
361 Gpu::copyAsync(Gpu::deviceToHost, mask+n-1, mask + n, &last_mask);
362 Gpu::copyAsync(Gpu::deviceToHost, offsets.data()+n-1, offsets.data()+n, &last_offset);
363
364 auto* p_offsets = offsets.dataPtr();
365
366 const auto src_data = src.getConstParticleTileData();
367 auto dst_data = dst.getParticleTileData();
368
370 {
371 if (mask[i]) { copyParticle(dst_data, src_data, src_start+i, dst_start+p_offsets[i]); }
372 });
373
375 return last_mask + last_offset;
376}
377
390template <typename DstTile, typename SrcTile, typename Pred,
391 std::enable_if_t<!std::is_pointer_v<std::decay_t<Pred>>,int> foo = 0>
392int filterParticles (DstTile& dst, const SrcTile& src, Pred&& p) noexcept
393{
394 return filterParticles(dst, src, std::forward<Pred>(p), 0, 0, src.numParticles());
395}
396
414template <typename DstTile, typename SrcTile, typename Pred, typename Index, typename N,
415 std::enable_if_t<!std::is_pointer_v<std::decay_t<Pred>>,Index> nvccfoo = 0>
416Index filterParticles (DstTile& dst, const SrcTile& src, Pred const& p,
417 Index src_start, Index dst_start, N n) noexcept
418{
420
421 auto* p_mask = mask.dataPtr();
422 const auto src_data = src.getConstParticleTileData();
423
425 [p, p_mask, src_data, src_start] AMREX_GPU_DEVICE (int i, amrex::RandomEngine const& engine) noexcept
426 {
427 amrex::ignore_unused(p, p_mask, src_data, src_start, engine);
428 if constexpr (IsCallable<Pred,decltype(src_data),Index,RandomEngine>::value) {
429 p_mask[i] = p(src_data, src_start+i, engine);
430 } else {
431 p_mask[i] = p(src_data, src_start+i);
432 }
433 });
434 return filterParticles(dst, src, mask.dataPtr(), src_start, dst_start, n);
435}
436
454template <typename DstTile, typename SrcTile, typename Index, typename F,
455 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
456Index filterAndTransformParticles (DstTile& dst, const SrcTile& src, Index* mask, F const& f,
457 Index src_start, Index dst_start) noexcept
458{
459 auto np = src.numParticles();
460 Gpu::DeviceVector<Index> offsets(np);
461 Gpu::exclusive_scan(mask, mask+np, offsets.begin());
462
463 Index last_mask=0, last_offset=0;
464 Gpu::copyAsync(Gpu::deviceToHost, mask+np-1, mask + np, &last_mask);
465 Gpu::copyAsync(Gpu::deviceToHost, offsets.data()+np-1, offsets.data()+np, &last_offset);
466
467 auto const* p_offsets = offsets.dataPtr();
468
469 const auto src_data = src.getConstParticleTileData();
470 auto dst_data = dst.getParticleTileData();
471
473 {
474 if (mask[i]) {
475 f(dst_data, src_data, src_start+i,
476 dst_start+p_offsets[src_start+i]);
477 }
478 });
479
481 return last_mask + last_offset;
482}
483
499template <typename DstTile, typename SrcTile, typename Index, typename F,
500 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
501Index filterAndTransformParticles (DstTile& dst, const SrcTile& src, Index* mask, F&& f) noexcept
502{
503 return filterAndTransformParticles(dst, src, mask, std::forward<F>(f), 0, 0);
504}
505
521template <typename DstTile, typename SrcTile, typename Pred, typename F,
522 std::enable_if_t<!std::is_pointer_v<std::decay_t<Pred>>,int> foo = 0>
523int filterAndTransformParticles (DstTile& dst, const SrcTile& src, Pred&& p, F&& f) noexcept
524{
525 return filterAndTransformParticles(dst, src, std::forward<Pred>(p), std::forward<F>(f), 0, 0);
526}
527
545template <typename DstTile1, typename DstTile2, typename SrcTile, typename Index, typename F,
546 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
547Index filterAndTransformParticles (DstTile1& dst1, DstTile2& dst2,
548 const SrcTile& src, Index* mask, F const& f) noexcept
549{
550 auto np = src.numParticles();
551 Gpu::DeviceVector<Index> offsets(np);
552 Gpu::exclusive_scan(mask, mask+np, offsets.begin());
553
554 Index last_mask=0, last_offset=0;
555 Gpu::copyAsync(Gpu::deviceToHost, mask+np-1, mask + np, &last_mask);
556 Gpu::copyAsync(Gpu::deviceToHost, offsets.data()+np-1, offsets.data()+np, &last_offset);
557
558 auto* p_offsets = offsets.dataPtr();
559
560 const auto src_data = src.getConstParticleTileData();
561 auto dst_data1 = dst1.getParticleTileData();
562 auto dst_data2 = dst2.getParticleTileData();
563
565 {
566 if (mask[i]) { f(dst_data1, dst_data2, src_data, i, p_offsets[i], p_offsets[i]); }
567 });
568
570 return last_mask + last_offset;
571}
572
590template <typename DstTile1, typename DstTile2, typename SrcTile, typename Pred, typename F,
591 std::enable_if_t<!std::is_pointer_v<std::decay_t<Pred>>, int> foo = 0>
592int filterAndTransformParticles (DstTile1& dst1, DstTile2& dst2, const SrcTile& src,
593 Pred const& p, F&& f) noexcept
594{
595 auto np = src.numParticles();
597
598 auto* p_mask = mask.dataPtr();
599 const auto src_data = src.getConstParticleTileData();
600
602 [p, p_mask, src_data] AMREX_GPU_DEVICE (int i, amrex::RandomEngine const& engine) noexcept
603 {
604 amrex::ignore_unused(p, p_mask, src_data, engine);
605 if constexpr (IsCallable<Pred,decltype(src_data),int,RandomEngine>::value) {
606 p_mask[i] = p(src_data, i, engine);
607 } else {
608 p_mask[i] = p(src_data, i);
609 }
610 });
611 return filterAndTransformParticles(dst1, dst2, src, mask.dataPtr(), std::forward<F>(f));
612}
613
614
632template <typename DstTile, typename SrcTile, typename Pred, typename F, typename Index,
633 std::enable_if_t<!std::is_pointer_v<std::decay_t<Pred>>,Index> nvccfoo = 0>
634Index filterAndTransformParticles (DstTile& dst, const SrcTile& src, Pred const& p, F&& f,
635 Index src_start, Index dst_start) noexcept
636{
637 auto np = src.numParticles();
639
640 auto* p_mask = mask.dataPtr();
641 const auto src_data = src.getConstParticleTileData();
642
644 [p, p_mask, src_data, src_start] AMREX_GPU_DEVICE (int i, amrex::RandomEngine const& engine) noexcept
645 {
646 amrex::ignore_unused(p, p_mask, src_data, src_start, engine);
647 if constexpr (IsCallable<Pred,decltype(src_data),Index,RandomEngine>::value) {
648 p_mask[i] = p(src_data, src_start+i, engine);
649 } else {
650 p_mask[i] = p(src_data, src_start+i);
651 }
652 });
653 return filterAndTransformParticles(dst, src, mask.dataPtr(), std::forward<F>(f), src_start, dst_start);
654}
655
656
672template <typename PTile, typename N, typename Index,
673 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
674void gatherParticles (PTile& dst, const PTile& src, N np, const Index* inds)
675{
676 const auto src_data = src.getConstParticleTileData();
677 auto dst_data = dst.getParticleTileData();
678
680 {
681 copyParticle(dst_data, src_data, inds[i], i);
682 });
683
685}
686
702template <typename PTile, typename N, typename Index,
703 std::enable_if_t<std::is_integral_v<Index>, int> foo = 0>
704void scatterParticles (PTile& dst, const PTile& src, N np, const Index* inds)
705{
706 const auto src_data = src.getConstParticleTileData();
707 auto dst_data = dst.getParticleTileData();
708
710 {
711 copyParticle(dst_data, src_data, i, inds[i]);
712 });
713
715}
716
717}
718
719#endif // include guard
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_HOST_DEVICE_FOR_1D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:105
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
Definition AMReX_PODVector.H:297
iterator begin() noexcept
Definition AMReX_PODVector.H:663
T * dataPtr() noexcept
Definition AMReX_PODVector.H:659
T * data() noexcept
Definition AMReX_PODVector.H:655
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:221
OutIter exclusive_scan(InIter begin, InIter end, OutIter result)
Definition AMReX_Scan.H:1415
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:99
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:260
Definition AMReX_Amr.cpp:49
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
__host__ __device__ void swapParticle(const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
A general single particle swapping routine that can run on the GPU.
Definition AMReX_ParticleTransformation.H:119
__host__ __device__ void Swap(T &t1, T &t2) noexcept
Definition AMReX_Algorithm.H:75
__host__ __device__ void copyParticle(const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ConstParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
A general single particle copying routine that can run on the GPU.
Definition AMReX_ParticleTransformation.H:31
void copyParticles(DstTile &dst, const SrcTile &src) noexcept
Copy particles from src to dst. This version copies all the particles, writing them to the beginning ...
Definition AMReX_ParticleTransformation.H:161
Index filterAndTransformParticles(DstTile &dst, const SrcTile &src, Index *mask, F const &f, Index src_start, Index dst_start) noexcept
Conditionally copy particles from src to dst based on the value of mask. A transformation will also b...
Definition AMReX_ParticleTransformation.H:456
Index filterParticles(DstTile &dst, const SrcTile &src, const Index *mask) noexcept
Conditionally copy particles from src to dst based on the value of mask.
Definition AMReX_ParticleTransformation.H:330
void gatherParticles(PTile &dst, const PTile &src, N np, const Index *inds)
Gather particles copies particles into contiguous order from an arbitrary order. Specifically,...
Definition AMReX_ParticleTransformation.H:674
void transformParticles(DstTile &dst, const SrcTile &src, F &&f) noexcept
Apply the function f to all the particles in src, writing the result to dst. This version does all th...
Definition AMReX_ParticleTransformation.H:213
void scatterParticles(PTile &dst, const PTile &src, N np, const Index *inds)
Scatter particles copies particles from contiguous order into an arbitrary order. Specifically,...
Definition AMReX_ParticleTransformation.H:704
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG(T n, L const &f) noexcept
Definition AMReX_GpuLaunchFunctsC.H:1264
Definition AMReX_ParticleTile.H:513
Test if a given type T is callable with arguments of type Args...
Definition AMReX_TypeTraits.H:209
Definition AMReX_ParticleTile.H:32
Definition AMReX_RandomEngine.H:72