Block-Structured AMR Software Framework
AMReX_ParticleCommunication.H
Go to the documentation of this file.
1 #ifndef AMREX_PARTICLECOMMUNICATION_H_
2 #define AMREX_PARTICLECOMMUNICATION_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_Gpu.H>
6 #include <AMReX_GpuContainers.H>
7 #include <AMReX_IntVect.H>
9 #include <AMReX_MFIter.H>
10 #include <AMReX_Scan.H>
11 #include <AMReX_TypeTraits.H>
12 #include <AMReX_MakeParticle.H>
13 
14 #include <map>
15 
16 namespace amrex {
17 
19 {
20  template <class PTile>
21  void resizeTiles (std::vector<PTile*>& tiles, const std::vector<int>& sizes, std::vector<int>& offsets) const
22  {
23  for(int i = 0; i < static_cast<int>(sizes.size()); ++i)
24  {
25  int offset = tiles[i]->numTotalParticles();
26  int nn = tiles[i]->getNumNeighbors();
27  tiles[i]->setNumNeighbors(nn + sizes[i]);
28  offsets.push_back(offset);
29  }
30  }
31 };
32 
34 {
35  template <class PTile>
36  void resizeTiles (std::vector<PTile*>& tiles, const std::vector<int>& sizes, std::vector<int>& offsets) const
37  {
38  int N = static_cast<int>(sizes.size());
39 
40  std::map<PTile*, int> tile_sizes;
41  for(int i = 0; i < N; ++i) {
42  tile_sizes[tiles[i]] = tiles[i]->numParticles();
43  }
44 
45  for(int i = 0; i < N; ++i)
46  {
47  offsets.push_back(tile_sizes[tiles[i]]);
48  tile_sizes[tiles[i]] += sizes[i];
49  }
50 
51  for (auto& kv : tile_sizes) {
52  kv.first->resize(kv.second);
53  }
54  }
55 };
56 
58 {
63 
64  void clear ();
65 
66  void setNumLevels (int num_levels);
67 
68  void resize (int gid, int lev, int size);
69 
70  [[nodiscard]] int numCopies (int gid, int lev) const
71  {
72  if (m_boxes.size() <= lev) { return 0; }
73  auto mit = m_boxes[lev].find(gid);
74  return mit == m_boxes[lev].end() ? 0 : int(mit->second.size());
75  }
76 
77  [[nodiscard]] int numLevels () const { return int(m_boxes.size()); }
78 };
79 
81 {
83 
87 
93 
94  Long m_NumSnds = 0;
95  int m_nrcvs = 0;
98 
101 
104 
106 
112 
115 
118 
121 
124 
125  Long superParticleSize() const { return m_superparticle_size; }
126 
127  template <class PC, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
128  void build (const PC& pc,
129  const ParticleCopyOp& op,
130  const Vector<int>& int_comp_mask,
131  const Vector<int>& real_comp_mask,
132  bool local)
133  {
134  BL_PROFILE("ParticleCopyPlan::build");
135 
136  m_local = local;
137 
138  const int ngrow = 1; // note - fix
139 
140  const int num_levels = op.numLevels();
141  const int num_buckets = pc.BufferMap().numBuckets();
142 
143  if (m_local)
144  {
145  m_neighbor_procs = pc.NeighborProcs(ngrow);
146  }
147  else
148  {
150  std::iota(m_neighbor_procs.begin(), m_neighbor_procs.end(), 0);
151  }
152 
153  m_box_counts_d.resize(0);
154  m_box_counts_d.resize(num_buckets+1, 0);
155  m_box_offsets.resize(num_buckets+1);
156  auto* p_dst_box_counts = m_box_counts_d.dataPtr();
157  auto getBucket = pc.BufferMap().getBucketFunctor();
158 
159  m_dst_indices.resize(num_levels);
160  for (int lev = 0; lev < num_levels; ++lev)
161  {
162  for (const auto& kv : pc.GetParticles(lev))
163  {
164  int gid = kv.first.first;
165  int num_copies = op.numCopies(gid, lev);
166  if (num_copies == 0) { continue; }
167  m_dst_indices[lev][gid].resize(num_copies);
168 
169  const auto* p_boxes = op.m_boxes[lev].at(gid).dataPtr();
170  const auto* p_levs = op.m_levels[lev].at(gid).dataPtr();
171  auto* p_dst_indices = m_dst_indices[lev][gid].dataPtr();
172 
173  AMREX_FOR_1D ( num_copies, i,
174  {
175  int dst_box = p_boxes[i];
176  if (dst_box >= 0)
177  {
178  int dst_lev = p_levs[i];
179  int index = static_cast<int>(Gpu::Atomic::Add(
180  &p_dst_box_counts[getBucket(dst_lev, dst_box)], 1U));
181  p_dst_indices[i] = index;
182  }
183  });
184  }
185  }
186 
188  m_box_offsets.begin());
189 
190  m_box_counts_h.resize(m_box_counts_d.size());
192  m_box_counts_h.begin());
193 
194  m_snd_pad_correction_h.resize(0);
196 
199  m_snd_pad_correction_d.begin());
200 
201  d_int_comp_mask.resize(int_comp_mask.size());
202  Gpu::copyAsync(Gpu::hostToDevice, int_comp_mask.begin(), int_comp_mask.end(),
203  d_int_comp_mask.begin());
204  d_real_comp_mask.resize(real_comp_mask.size());
205  Gpu::copyAsync(Gpu::hostToDevice, real_comp_mask.begin(), real_comp_mask.end(),
206  d_real_comp_mask.begin());
207 
209 
210  int NStructReal = PC::ParticleContainerType::NStructReal;
211  int NStructInt = PC::ParticleContainerType::NStructInt;
212 
213  int num_real_comm_comp = 0;
214  for (int i = AMREX_SPACEDIM + NStructReal; i < real_comp_mask.size(); ++i) {
215  if (real_comp_mask[i]) {++num_real_comm_comp;}
216  }
217 
218  int num_int_comm_comp = 0;
219  for (int i = 2 + NStructInt; i < int_comp_mask.size(); ++i) {
220  if (int_comp_mask[i]) {++num_int_comm_comp;}
221  }
222 
223  if constexpr (PC::ParticleType::is_soa_particle) {
224  m_superparticle_size = sizeof(uint64_t); // idcpu
225  } else {
226  m_superparticle_size = sizeof(typename PC::ParticleType);
227  }
228  m_superparticle_size += num_real_comm_comp * sizeof(typename PC::ParticleType::RealType)
229  + num_int_comm_comp * sizeof(int);
230 
231  buildMPIStart(pc.BufferMap(), m_superparticle_size);
232  }
233 
234  void clear ();
235 
236  void buildMPIFinish (const ParticleBufferMap& map);
237 
238 private:
239 
240  void buildMPIStart (const ParticleBufferMap& map, Long psize);
241 
242  //
243  // Snds - a Vector with the number of bytes that is process will send to each proc.
244  // Rcvs - a Vector that, after calling this method, will contain the
245  // number of bytes this process will receive from each proc.
246  //
247  void doHandShake (const Vector<Long>& Snds, Vector<Long>& Rcvs) const;
248 
249  //
250  // In the local version of this method, each proc knows which other
251  // procs it could possibly receive messages from, meaning we can do
252  // this purely with point-to-point communication.
253  //
254  void doHandShakeLocal (const Vector<Long>& Snds, Vector<Long>& Rcvs) const;
255 
256  //
257  // In the global version, we don't know who we'll receive from, so we
258  // need to do some collective communication first.
259  //
260  static void doHandShakeGlobal (const Vector<Long>& Snds, Vector<Long>& Rcvs);
261 
262  //
263  // Another version of the above that is implemented using MPI All-to-All
264  //
265  static void doHandShakeAllToAll (const Vector<Long>& Snds, Vector<Long>& Rcvs);
266 
267  bool m_local;
268 };
269 
271 {
272  const unsigned int* m_box_offsets;
273  const std::size_t* m_pad_correction;
274 
277 
279  : m_box_offsets(plan.m_box_offsets.dataPtr()),
280  m_pad_correction(plan.m_snd_pad_correction_d.dataPtr()),
281  m_get_pid(map.getPIDFunctor()),
282  m_get_bucket(map.getBucketFunctor())
283  {}
284 
286  Long operator() (int dst_box, int dst_lev, std::size_t psize, int i) const
287  {
288  int dst_pid = m_get_pid(dst_lev, dst_box);
289  Long dst_offset = Long(psize)*(m_box_offsets[m_get_bucket(dst_lev, dst_box)] + i);
290  dst_offset += Long(m_pad_correction[dst_pid]);
291  return dst_offset;
292  }
293 };
294 
295 template <class PC, class Buffer,
296  std::enable_if_t<IsParticleContainer<PC>::value &&
297  std::is_base_of_v<PolymorphicArenaAllocator<typename Buffer::value_type>,
298  Buffer>, int> foo = 0>
299 void packBuffer (const PC& pc, const ParticleCopyOp& op, const ParticleCopyPlan& plan,
300  Buffer& snd_buffer)
301 {
302  BL_PROFILE("amrex::packBuffer");
303 
304  Long psize = plan.superParticleSize();
305 
306  int num_levels = op.numLevels();
307  int num_buckets = pc.BufferMap().numBuckets();
308 
309  std::size_t total_buffer_size = 0;
310  if (plan.m_snd_offsets.empty())
311  {
312  unsigned int np = 0;
313  Gpu::copy(Gpu::deviceToHost, plan.m_box_offsets.begin() + num_buckets,
314  plan.m_box_offsets.begin() + num_buckets + 1, &np);
315  total_buffer_size = np*psize;
316  }
317  else
318  {
319  total_buffer_size = plan.m_snd_offsets.back();
320  }
321 
322  if (! snd_buffer.arena()->hasFreeDeviceMemory(total_buffer_size)) {
323  snd_buffer.clear();
324  snd_buffer.setArena(The_Pinned_Arena());
325  }
326  snd_buffer.resize(total_buffer_size);
327 
328  const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
329  const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
330 
331  const auto plo = pc.Geom(0).ProbLoArray();
332  const auto phi = pc.Geom(0).ProbHiArray();
333  const auto is_per = pc.Geom(0).isPeriodicArray();
334  for (int lev = 0; lev < num_levels; ++lev)
335  {
336  auto& plev = pc.GetParticles(lev);
337  for (auto& kv : plev)
338  {
339  int gid = kv.first.first;
340  int tid = kv.first.second;
341  auto index = std::make_pair(gid, tid);
342 
343  auto& src_tile = plev.at(index);
344  const auto& ptd = src_tile.getConstParticleTileData();
345 
346  int num_copies = op.numCopies(gid, lev);
347  if (num_copies == 0) { continue; }
348 
349  const auto* p_boxes = op.m_boxes[lev].at(gid).dataPtr();
350  const auto* p_levels = op.m_levels[lev].at(gid).dataPtr();
351  const auto* p_src_indices = op.m_src_indices[lev].at(gid).dataPtr();
352  const auto* p_periodic_shift = op.m_periodic_shift[lev].at(gid).dataPtr();
353  const auto* p_dst_indices = plan.m_dst_indices[lev].at(gid).dataPtr();
354  auto* p_snd_buffer = snd_buffer.dataPtr();
355  GetSendBufferOffset get_offset(plan, pc.BufferMap());
356 
357  AMREX_FOR_1D ( num_copies, i,
358  {
359  int dst_box = p_boxes[i];
360  if (dst_box >= 0)
361  {
362  int dst_lev = p_levels[i];
363  auto dst_offset = get_offset(dst_box, dst_lev, psize, p_dst_indices[i]);
364  int src_index = p_src_indices[i];
365  ptd.packParticleData(p_snd_buffer, src_index, dst_offset, p_comm_real, p_comm_int);
366 
367  const IntVect& pshift = p_periodic_shift[i];
368  bool do_periodic_shift =
369  AMREX_D_TERM( (is_per[0] && pshift[0] != 0),
370  || (is_per[1] && pshift[1] != 0),
371  || (is_per[2] && pshift[2] != 0) );
372 
373  if (do_periodic_shift)
374  {
375  ParticleReal pos[AMREX_SPACEDIM];
376  amrex::Gpu::memcpy(&pos[0], &p_snd_buffer[dst_offset],
377  AMREX_SPACEDIM*sizeof(ParticleReal));
378  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim)
379  {
380  if (! is_per[idim]) { continue; }
381  if (pshift[idim] > 0) {
382  pos[idim] += phi[idim] - plo[idim];
383  } else if (pshift[idim] < 0) {
384  pos[idim] -= phi[idim] - plo[idim];
385  }
386  }
387  amrex::Gpu::memcpy(&p_snd_buffer[dst_offset], &pos[0],
388  AMREX_SPACEDIM*sizeof(ParticleReal));
389  }
390  }
391  });
392  }
393  }
394 }
395 
396 template <class PC, class Buffer, class UnpackPolicy,
397  std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
398 void unpackBuffer (PC& pc, const ParticleCopyPlan& plan, const Buffer& snd_buffer, UnpackPolicy const& policy)
399 {
400  BL_PROFILE("amrex::unpackBuffer");
401 
402  using PTile = typename PC::ParticleTileType;
403 
404  int num_levels = pc.BufferMap().numLevels();
405  Long psize = plan.superParticleSize();
406 
407  // count how many particles we have to add to each tile
408  std::vector<int> sizes;
409  std::vector<PTile*> tiles;
410  for (int lev = 0; lev < num_levels; ++lev)
411  {
412  for(MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
413  {
414  int gid = mfi.index();
415  int tid = mfi.LocalTileIndex();
416  auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
417  int num_copies = plan.m_box_counts_h[pc.BufferMap().gridAndLevToBucket(gid, lev)];
418  sizes.push_back(num_copies);
419  tiles.push_back(&tile);
420  }
421  }
422 
423  // resize the tiles and compute offsets
424  std::vector<int> offsets;
425  policy.resizeTiles(tiles, sizes, offsets);
426 
427  const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
428  const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
429 
430  // local unpack
431  int uindex = 0;
432  for (int lev = 0; lev < num_levels; ++lev)
433  {
434  auto& plev = pc.GetParticles(lev);
435  for(MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
436  {
437  int gid = mfi.index();
438  int tid = mfi.LocalTileIndex();
439  auto index = std::make_pair(gid, tid);
440 
441  auto& tile = plev[index];
442 
443  GetSendBufferOffset get_offset(plan, pc.BufferMap());
444  auto p_snd_buffer = snd_buffer.dataPtr();
445 
446  int offset = offsets[uindex];
447  int size = sizes[uindex];
448  ++uindex;
449 
450  auto ptd = tile.getParticleTileData();
451  AMREX_FOR_1D ( size, i,
452  {
453  auto src_offset = get_offset(gid, lev, psize, i);
454  int dst_index = offset + i;
455  ptd.unpackParticleData(p_snd_buffer, src_offset, dst_index, p_comm_real, p_comm_int);
456  });
457  }
458  }
459 }
460 
461 template <class PC, class SndBuffer, class RcvBuffer,
462  std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
463 void communicateParticlesStart (const PC& pc, ParticleCopyPlan& plan, const SndBuffer& snd_buffer, RcvBuffer& rcv_buffer)
464 {
465  BL_PROFILE("amrex::communicateParticlesStart");
466 
467 #ifdef AMREX_USE_MPI
468  Long psize = plan.superParticleSize();
469  const int NProcs = ParallelContext::NProcsSub();
470  const int MyProc = ParallelContext::MyProcSub();
471 
472  if (NProcs == 1) { return; }
473 
474  Vector<int> RcvProc;
475  Vector<Long> rOffset;
476 
477  plan.m_rcv_pad_correction_h.resize(0);
478  plan.m_rcv_pad_correction_h.push_back(0);
479 
480  Long TotRcvBytes = 0;
481  for (int i = 0; i < NProcs; ++i) {
482  if (plan.m_rcv_num_particles[i] > 0) {
483  RcvProc.push_back(i);
484  Long nbytes = plan.m_rcv_num_particles[i]*psize;
485  std::size_t acd = ParallelDescriptor::sizeof_selected_comm_data_type(nbytes);
486  TotRcvBytes = Long(amrex::aligned_size(acd, TotRcvBytes));
487  rOffset.push_back(TotRcvBytes);
488  TotRcvBytes += Long(amrex::aligned_size(acd, nbytes));
489  plan.m_rcv_pad_correction_h.push_back(plan.m_rcv_pad_correction_h.back() + nbytes);
490  }
491  }
492 
493  for (int i = 0; i < plan.m_nrcvs; ++i)
494  {
495  plan.m_rcv_pad_correction_h[i] = rOffset[i] - plan.m_rcv_pad_correction_h[i];
496  }
497 
500  plan.m_rcv_pad_correction_d.begin());
501 
502  rcv_buffer.resize(TotRcvBytes);
503 
504  plan.m_nrcvs = int(RcvProc.size());
505 
506  plan.m_particle_stats.resize(0);
507  plan.m_particle_stats.resize(plan.m_nrcvs);
508 
509  plan.m_particle_rreqs.resize(0);
510  plan.m_particle_rreqs.resize(plan.m_nrcvs);
511 
512  const int SeqNum = ParallelDescriptor::SeqNum();
513 
514  // Post receives.
515  for (int i = 0; i < plan.m_nrcvs; ++i) {
516  const auto Who = RcvProc[i];
517  const auto offset = rOffset[i];
518  Long nbytes = plan.m_rcv_num_particles[Who]*psize;
519  std::size_t acd = ParallelDescriptor::sizeof_selected_comm_data_type(nbytes);
520  const auto Cnt = amrex::aligned_size(acd, nbytes);
521 
522  AMREX_ASSERT(Cnt > 0);
523  AMREX_ASSERT(Who >= 0 && Who < NProcs);
524  AMREX_ASSERT(amrex::aligned_size(acd, nbytes) % acd == 0);
525 
526  plan.m_particle_rreqs[i] =
527  ParallelDescriptor::Arecv((char*) (rcv_buffer.dataPtr() + offset), Cnt, Who, SeqNum, ParallelContext::CommunicatorSub()).req();
528  }
529 
530  if (plan.m_NumSnds == 0) { return; }
531 
532  // Send.
533  for (int i = 0; i < NProcs; ++i)
534  {
535  if (i == MyProc) { continue; }
536  const auto Who = i;
537  const auto Cnt = plan.m_snd_counts[i];
538  if (Cnt == 0) { continue; }
539 
540  auto snd_offset = plan.m_snd_offsets[i];
541  AMREX_ASSERT(plan.m_snd_counts[i] % ParallelDescriptor::sizeof_selected_comm_data_type(plan.m_snd_num_particles[i]*psize) == 0);
542  AMREX_ASSERT(Who >= 0 && Who < NProcs);
543 
544  ParallelDescriptor::Send((char const*)(snd_buffer.dataPtr()+snd_offset), Cnt, Who, SeqNum,
546  }
547 
549 #else
550  amrex::ignore_unused(pc,plan,snd_buffer,rcv_buffer);
551 #endif // MPI
552 }
553 
554 void communicateParticlesFinish (const ParticleCopyPlan& plan);
555 
556 template <class PC, class Buffer, class UnpackPolicy,
557  std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
558 void unpackRemotes (PC& pc, const ParticleCopyPlan& plan, Buffer& rcv_buffer, UnpackPolicy const& policy)
559 {
560  BL_PROFILE("amrex::unpackRemotes");
561 
562 #ifdef AMREX_USE_MPI
563  const int NProcs = ParallelContext::NProcsSub();
564  if (NProcs == 1) { return; }
565 
566  const int MyProc = ParallelContext::MyProcSub();
568  using PTile = typename PC::ParticleTileType;
569 
570  if (plan.m_nrcvs > 0)
571  {
572  const auto* p_comm_real = plan.d_real_comp_mask.dataPtr();
573  const auto* p_comm_int = plan.d_int_comp_mask.dataPtr();
574  auto* p_rcv_buffer = rcv_buffer.dataPtr();
575 
576  std::vector<int> sizes;
577  std::vector<PTile*> tiles;
578  for (int i = 0, N = int(plan.m_rcv_box_counts.size()); i < N; ++i)
579  {
580  int copy_size = plan.m_rcv_box_counts[i];
581  int lev = plan.m_rcv_box_levs[i];
582  int gid = plan.m_rcv_box_ids[i];
583  int tid = 0;
584  auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
585  sizes.push_back(copy_size);
586  tiles.push_back(&tile);
587  }
588 
589  Vector<int> offsets;
590  policy.resizeTiles(tiles, sizes, offsets);
592  int uindex = 0;
593  int procindex = 0, rproc = plan.m_rcv_box_pids[0];
594  for (int i = 0, N = int(plan.m_rcv_box_counts.size()); i < N; ++i)
595  {
596  int lev = plan.m_rcv_box_levs[i];
597  int gid = plan.m_rcv_box_ids[i];
598  int tid = 0;
599  auto offset = plan.m_rcv_box_offsets[i];
600  procindex = (rproc == plan.m_rcv_box_pids[i]) ? procindex : procindex+1;
601  rproc = plan.m_rcv_box_pids[i];
602 
603  auto& tile = pc.DefineAndReturnParticleTile(lev, gid, tid);
604  auto ptd = tile.getParticleTileData();
605 
607  ParallelContext::global_to_local_rank(pc.ParticleDistributionMap(lev)[gid]));
608 
609  int dst_offset = offsets[uindex];
610  int size = sizes[uindex];
611  ++uindex;
612 
613  Long psize = plan.superParticleSize();
614  const auto* p_pad_adjust = plan.m_rcv_pad_correction_d.dataPtr();
615 
616  AMREX_FOR_1D ( size, ip, {
617  Long src_offset = psize*(offset + ip) + p_pad_adjust[procindex];
618  int dst_index = dst_offset + ip;
619  ptd.unpackParticleData(p_rcv_buffer, src_offset, dst_index,
620  p_comm_real, p_comm_int);
621  });
622 
624  }
625  }
626 #else
627  amrex::ignore_unused(pc,plan,rcv_buffer,policy);
628 #endif // MPI
629 }
630 
631 } // namespace amrex
632 
633 #endif // AMREX_PARTICLECOMMUNICATION_H_
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_FOR_1D(...)
Definition: AMReX_GpuLaunch.nolint.H:41
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_PODVector.H:246
MPI_Request req() const
Definition: AMReX_ParallelDescriptor.H:74
Definition: AMReX_ParticleBufferMap.H:53
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T Add(T *sum, T value) noexcept
Definition: AMReX_GpuAtomic.H:198
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
void copy(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition: AMReX_GpuContainers.H:121
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition: AMReX_GpuContainers.H:233
OutIter exclusive_scan(InIter begin, InIter end, OutIter result)
Definition: AMReX_Scan.H:1377
static constexpr DeviceToHost deviceToHost
Definition: AMReX_GpuContainers.H:99
static constexpr HostToDevice hostToDevice
Definition: AMReX_GpuContainers.H:98
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void * memcpy(void *dest, const void *src, std::size_t count)
Definition: AMReX_GpuUtility.H:214
int NProcs()
Process ID in MPI_COMM_WORLD.
Definition: AMReX_MPMD.cpp:122
int MyProc()
Definition: AMReX_MPMD.cpp:117
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition: AMReX_ParallelContext.H:76
int global_to_local_rank(int rank) noexcept
Definition: AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition: AMReX_ParallelContext.H:74
Message Send(const T *buf, size_t n, int dst_pid, int tag)
Definition: AMReX_ParallelDescriptor.H:1109
int SeqNum() noexcept
Returns sequential message sequence numbers, usually used as tags for send/recv.
Definition: AMReX_ParallelDescriptor.H:613
Message Arecv(T *, size_t n, int pid, int tag)
Definition: AMReX_ParallelDescriptor.H:1130
Definition: AMReX_Amr.cpp:49
void communicateParticlesStart(const PC &pc, ParticleCopyPlan &plan, const SndBuffer &snd_buffer, RcvBuffer &rcv_buffer)
Definition: AMReX_ParticleCommunication.H:463
void unpackRemotes(PC &pc, const ParticleCopyPlan &plan, Buffer &rcv_buffer, UnpackPolicy const &policy)
Definition: AMReX_ParticleCommunication.H:558
void communicateParticlesFinish(const ParticleCopyPlan &plan)
Definition: AMReX_ParticleCommunication.cpp:371
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
Arena * The_Pinned_Arena()
Definition: AMReX_Arena.cpp:634
const int[]
Definition: AMReX_BLProfiler.cpp:1664
std::size_t aligned_size(std::size_t align_requirement, std::size_t size) noexcept
Given a minimum required size of size bytes, this returns the next largest arena size that will align...
Definition: AMReX_Arena.H:30
void unpackBuffer(PC &pc, const ParticleCopyPlan &plan, const Buffer &snd_buffer, UnpackPolicy const &policy)
Definition: AMReX_ParticleCommunication.H:398
void packBuffer(const PC &pc, const ParticleCopyOp &op, const ParticleCopyPlan &plan, Buffer &snd_buffer)
Definition: AMReX_ParticleCommunication.H:299
Definition: AMReX_ParticleBufferMap.H:35
Definition: AMReX_ParticleBufferMap.H:14
Definition: AMReX_ParticleCommunication.H:271
const unsigned int * m_box_offsets
Definition: AMReX_ParticleCommunication.H:272
GetPID m_get_pid
Definition: AMReX_ParticleCommunication.H:275
AMREX_FORCE_INLINE AMREX_GPU_DEVICE Long operator()(int dst_box, int dst_lev, std::size_t psize, int i) const
Definition: AMReX_ParticleCommunication.H:286
GetBucket m_get_bucket
Definition: AMReX_ParticleCommunication.H:276
const std::size_t * m_pad_correction
Definition: AMReX_ParticleCommunication.H:273
GetSendBufferOffset(const ParticleCopyPlan &plan, const ParticleBufferMap &map)
Definition: AMReX_ParticleCommunication.H:278
Definition: AMReX_ParticleCommunication.H:19
void resizeTiles(std::vector< PTile * > &tiles, const std::vector< int > &sizes, std::vector< int > &offsets) const
Definition: AMReX_ParticleCommunication.H:21
Definition: AMReX_ParticleCommunication.H:58
int numCopies(int gid, int lev) const
Definition: AMReX_ParticleCommunication.H:70
void setNumLevels(int num_levels)
Definition: AMReX_ParticleCommunication.cpp:14
void clear()
Definition: AMReX_ParticleCommunication.cpp:6
int numLevels() const
Definition: AMReX_ParticleCommunication.H:77
Vector< std::map< int, Gpu::DeviceVector< IntVect > > > m_periodic_shift
Definition: AMReX_ParticleCommunication.H:62
Vector< std::map< int, Gpu::DeviceVector< int > > > m_boxes
Definition: AMReX_ParticleCommunication.H:59
Vector< std::map< int, Gpu::DeviceVector< int > > > m_levels
Definition: AMReX_ParticleCommunication.H:60
void resize(int gid, int lev, int size)
Definition: AMReX_ParticleCommunication.cpp:22
Vector< std::map< int, Gpu::DeviceVector< int > > > m_src_indices
Definition: AMReX_ParticleCommunication.H:61
Definition: AMReX_ParticleCommunication.H:81
Vector< int > m_rcv_box_ids
Definition: AMReX_ParticleCommunication.H:90
Vector< std::size_t > m_snd_offsets
Definition: AMReX_ParticleCommunication.H:113
Vector< int > m_rcv_box_counts
Definition: AMReX_ParticleCommunication.H:88
Vector< std::size_t > m_snd_counts
Definition: AMReX_ParticleCommunication.H:114
Long m_NumSnds
Definition: AMReX_ParticleCommunication.H:94
bool m_local
Definition: AMReX_ParticleCommunication.H:267
void doHandShake(const Vector< Long > &Snds, Vector< Long > &Rcvs) const
Definition: AMReX_ParticleCommunication.cpp:256
Vector< int > m_neighbor_procs
Definition: AMReX_ParticleCommunication.H:105
Vector< int > m_rcv_box_pids
Definition: AMReX_ParticleCommunication.H:91
Vector< int > m_rcv_box_levs
Definition: AMReX_ParticleCommunication.H:92
Gpu::DeviceVector< int > d_real_comp_mask
Definition: AMReX_ParticleCommunication.H:122
Gpu::DeviceVector< std::size_t > m_snd_pad_correction_d
Definition: AMReX_ParticleCommunication.H:117
static void doHandShakeAllToAll(const Vector< Long > &Snds, Vector< Long > &Rcvs)
Definition: AMReX_ParticleCommunication.cpp:304
Long m_superparticle_size
Definition: AMReX_ParticleCommunication.H:123
Vector< Long > m_Snds
Definition: AMReX_ParticleCommunication.H:107
Gpu::DeviceVector< unsigned int > m_box_counts_d
Definition: AMReX_ParticleCommunication.H:84
Vector< Long > m_Rcvs
Definition: AMReX_ParticleCommunication.H:108
Vector< int > m_rcv_box_offsets
Definition: AMReX_ParticleCommunication.H:89
Vector< std::size_t > m_snd_pad_correction_h
Definition: AMReX_ParticleCommunication.H:116
Vector< std::map< int, Gpu::DeviceVector< int > > > m_dst_indices
Definition: AMReX_ParticleCommunication.H:82
Gpu::DeviceVector< unsigned int > m_box_offsets
Definition: AMReX_ParticleCommunication.H:86
Gpu::DeviceVector< std::size_t > m_rcv_pad_correction_d
Definition: AMReX_ParticleCommunication.H:120
Vector< std::size_t > m_rOffset
Definition: AMReX_ParticleCommunication.H:110
void buildMPIStart(const ParticleBufferMap &map, Long psize)
Definition: AMReX_ParticleCommunication.cpp:48
Vector< Long > m_snd_num_particles
Definition: AMReX_ParticleCommunication.H:102
Vector< MPI_Request > m_particle_rreqs
Definition: AMReX_ParticleCommunication.H:100
Long superParticleSize() const
Definition: AMReX_ParticleCommunication.H:125
Vector< MPI_Status > m_particle_stats
Definition: AMReX_ParticleCommunication.H:99
Gpu::HostVector< int > m_rcv_data
Definition: AMReX_ParticleCommunication.H:111
Vector< MPI_Status > m_build_stats
Definition: AMReX_ParticleCommunication.H:96
void buildMPIFinish(const ParticleBufferMap &map)
Definition: AMReX_ParticleCommunication.cpp:207
void build(const PC &pc, const ParticleCopyOp &op, const Vector< int > &int_comp_mask, const Vector< int > &real_comp_mask, bool local)
Definition: AMReX_ParticleCommunication.H:128
Vector< int > m_RcvProc
Definition: AMReX_ParticleCommunication.H:109
Vector< std::size_t > m_rcv_pad_correction_h
Definition: AMReX_ParticleCommunication.H:119
int m_nrcvs
Definition: AMReX_ParticleCommunication.H:95
Gpu::HostVector< unsigned int > m_box_counts_h
Definition: AMReX_ParticleCommunication.H:85
Vector< MPI_Request > m_build_rreqs
Definition: AMReX_ParticleCommunication.H:97
Vector< Long > m_rcv_num_particles
Definition: AMReX_ParticleCommunication.H:103
Gpu::DeviceVector< int > d_int_comp_mask
Definition: AMReX_ParticleCommunication.H:122
static void doHandShakeGlobal(const Vector< Long > &Snds, Vector< Long > &Rcvs)
Definition: AMReX_ParticleCommunication.cpp:327
void doHandShakeLocal(const Vector< Long > &Snds, Vector< Long > &Rcvs) const
Definition: AMReX_ParticleCommunication.cpp:263
void clear()
Definition: AMReX_ParticleCommunication.cpp:34
Definition: AMReX_ParticleCommunication.H:34
void resizeTiles(std::vector< PTile * > &tiles, const std::vector< int > &sizes, std::vector< int > &offsets) const
Definition: AMReX_ParticleCommunication.H:36