Block-Structured AMR Software Framework
AMReX_NeighborParticlesCPUImpl.H
Go to the documentation of this file.
1 #ifndef AMREX_NEIGHBORPARTICLESCPUIMPL_H_
2 #define AMREX_NEIGHBORPARTICLESCPUIMPL_H_
3 #include <AMReX_Config.H>
4 
5 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
6 void
7 NeighborParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
8 ::fillNeighborsCPU () {
9  BL_PROFILE("NeighborParticleContainer::fillNeighborsCPU");
10  if (!areMasksValid()) {
11  BuildMasks();
12  GetNeighborCommTags();
13  }
14  cacheNeighborInfo();
15  updateNeighborsCPU(false);
16 }
17 
18 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
19 void
20 NeighborParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
21 ::sumNeighborsCPU (int real_start_comp, int real_num_comp,
22  int int_start_comp, int int_num_comp)
23 {
24  BL_PROFILE("NeighborParticleContainer::sumNeighborsCPU");
25 
26  if ( ! enableInverse() )
27  {
28  amrex::Abort("Need to enable inverse to true to use sumNeighbors. \n");
29  }
30 
31  const int MyProc = ParallelContext::MyProcSub();
32 
33  std::map<int, Vector<char> > isend_data;
34 
35  for (int lev = 0; lev < this->numLevels(); ++lev)
36  {
37  for (MyParIter pti(*this, lev); pti.isValid(); ++pti)
38  {
39  PairIndex src_index(pti.index(), pti.LocalTileIndex());
40  const auto& tags = inverse_tags[lev][src_index];
41  const auto& neighbs = neighbors[lev][src_index].GetArrayOfStructs();
42  AMREX_ASSERT(tags.size() == neighbs.size());
43 
44  const int num_neighbs = neighbs.size();
45  for (int i = 0; i < num_neighbs; ++i)
46  {
47  const auto& neighb = neighbs[i];
48  const auto& tag = tags[i];
49  const int dst_grid = tag.src_grid;
50  const int global_rank = this->ParticleDistributionMap(lev)[dst_grid];
51  const int dst_proc = ParallelContext::global_to_local_rank(global_rank);
52  const int dst_tile = tag.src_tile;
53  const int dst_index = tag.src_index;
54  const int dst_level = tag.src_level;
55 
56  if (dst_proc == MyProc)
57  {
58  auto pair = std::make_pair(dst_grid, dst_tile);
59  auto& dst_ptile = this->GetParticles(dst_level)[pair];
60  auto& dst_parts = dst_ptile.GetArrayOfStructs();
61  auto& p = dst_parts[dst_index];
62 
63  for (int comp = real_start_comp; comp < real_start_comp + real_num_comp; ++comp)
64  {
65  p.rdata(comp) += neighb.rdata(comp);
66  }
67 
68  for (int comp = int_start_comp; comp < int_start_comp + int_num_comp; ++comp)
69  {
70  p.idata(comp) += neighb.idata(comp);
71  }
72  }
73 
74  else
75  {
76  auto& sdata = isend_data[dst_proc];
77  auto old_size = sdata.size();
78  auto new_size = old_size + real_num_comp*sizeof(Real) + int_num_comp*sizeof(int) + 4*sizeof(int);
79  sdata.resize(new_size);
80  char* dst = &sdata[old_size];
81  std::memcpy(dst, &dst_grid, sizeof(int)); dst += sizeof(int);
82  std::memcpy(dst, &dst_tile, sizeof(int)); dst += sizeof(int);
83  std::memcpy(dst, &dst_index, sizeof(int)); dst += sizeof(int);
84  std::memcpy(dst, &dst_level, sizeof(int)); dst += sizeof(int);
85  for (int comp = real_start_comp; comp < real_start_comp + real_num_comp; ++comp)
86  {
87  Real data = neighb.rdata(comp);
88  std::memcpy(dst, &data, sizeof(Real));
89  dst += sizeof(Real);
90  }
91  for (int comp = int_start_comp; comp < int_start_comp + int_num_comp; ++comp)
92  {
93  int data = neighb.idata(comp);
94  std::memcpy(dst, &data, sizeof(int));
95  dst += sizeof(int);
96  }
97  }
98  }
99  }
100  }
101 
102  sumNeighborsMPI(isend_data, real_start_comp, real_num_comp, int_start_comp, int_num_comp);
103 }
104 
105 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
106 void
108 sumNeighborsMPI (std::map<int, Vector<char> >& not_ours,
109  int real_start_comp, int real_num_comp,
110  int int_start_comp, int int_num_comp)
111 {
112  BL_PROFILE("NeighborParticleContainer::sumNeighborsMPI");
113 
114 #ifdef AMREX_USE_MPI
115  const int NProcs = ParallelContext::NProcsSub();
116 
117  Vector<Long> isnds(NProcs, 0);
118  Vector<Long> ircvs(NProcs, 0);
119  for (int i = 0; i < NProcs; ++i) {
120  ircvs[i] = 0;
121  }
122 
123  {
124  // each proc figures out how many bytes it will send, and how
125  // many it will receive
126 
127  Long num_isnds = 0;
128  for (const auto& kv : not_ours)
129  {
130  num_isnds += kv.second.size();
131  isnds[kv.first] = kv.second.size();
132  }
133 
135 
136  if (num_isnds == 0) { return; }
137 
138  const int num_ircvs = neighbor_procs.size();
139  Vector<MPI_Status> stats(num_ircvs);
140  Vector<MPI_Request> rreqs(num_ircvs);
141 
142  const int SeqNum = ParallelDescriptor::SeqNum();
143 
144  // Post receives
145  for (int i = 0; i < num_ircvs; ++i)
146  {
147  const int Who = neighbor_procs[i];
148  const Long Cnt = 1;
149 
150  AMREX_ASSERT(Who >= 0 && Who < NProcs);
151 
152  rreqs[i] = ParallelDescriptor::Arecv(&ircvs[Who], Cnt, Who, SeqNum,
154  }
155 
156  // Send.
157  for (int i = 0; i < num_ircvs; ++i) {
158  const int Who = neighbor_procs[i];
159  const Long Cnt = 1;
160 
161  AMREX_ASSERT(Who >= 0 && Who < NProcs);
162 
163  ParallelDescriptor::Send(&isnds[Who], Cnt, Who, SeqNum,
165  }
166 
167  if (num_ircvs > 0) { ParallelDescriptor::Waitall(rreqs, stats); }
168  }
169 
170  Vector<int> RcvProc;
171  Vector<std::size_t> rOffset; // Offset (in bytes) in the receive buffer
172  std::size_t TotRcvBytes = 0;
173  for (int i = 0; i < NProcs; ++i) {
174  if (ircvs[i] > 0) {
175  RcvProc.push_back(i);
176  rOffset.push_back(TotRcvBytes);
177  TotRcvBytes += ircvs[i];
178  }
179  }
180 
181  const auto nrcvs = int(RcvProc.size());
182  Vector<MPI_Status> stats(nrcvs);
183  Vector<MPI_Request> rreqs(nrcvs);
184 
185  const int SeqNum = ParallelDescriptor::SeqNum();
186 
187  // Allocate data for rcvs as one big chunk.
188  Vector<char> recvdata(TotRcvBytes);
189 
190  // Post receives.
191  for (int i = 0; i < nrcvs; ++i) {
192  const auto Who = RcvProc[i];
193  const auto offset = rOffset[i];
194  const auto Cnt = ircvs[Who];
195 
196  AMREX_ASSERT(Cnt > 0);
198  AMREX_ASSERT(Who >= 0 && Who < NProcs);
199 
200  rreqs[i] = ParallelDescriptor::Arecv(&recvdata[offset], Cnt, Who, SeqNum,
202  }
203 
204  // Send.
205  for (const auto& kv : not_ours) {
206  const auto Who = kv.first;
207  const auto Cnt = kv.second.size();
208 
209  AMREX_ASSERT(Cnt > 0);
210  AMREX_ASSERT(Who >= 0 && Who < NProcs);
212 
213  ParallelDescriptor::Send(kv.second.data(), Cnt, Who, SeqNum,
215  }
216 
217  // unpack the received data and put them into the proper neighbor buffers
218  if (nrcvs > 0)
219  {
220  ParallelDescriptor::Waitall(rreqs, stats);
221 
222  const size_t data_size = real_num_comp*sizeof(Real) + int_num_comp*sizeof(int) + 4 * sizeof(int);
223 
224  if (recvdata.size() % data_size != 0) {
225  amrex::Print() << recvdata.size() << " " << data_size << "\n";
226  if (this->m_verbose) {
227  amrex::AllPrint() << "NeighborParticles::sumNeighbors: sizes = "
228  << recvdata.size() << ", " << data_size << "\n";
229  }
230  amrex::Abort("NeighborParticles::sumNeighbors: How did this happen?");
231  }
232 
233  auto npart = int(recvdata.size() / data_size);
234 
235  char* buffer = recvdata.data();
236  for (int j = 0; j < npart; ++j)
237  {
238  int grid, tile, index, lev;
239  std::memcpy(&grid, buffer, sizeof(int)); buffer += sizeof(int);
240  std::memcpy(&tile, buffer, sizeof(int)); buffer += sizeof(int);
241  std::memcpy(&index, buffer, sizeof(int)); buffer += sizeof(int);
242  std::memcpy(&lev, buffer, sizeof(int)); buffer += sizeof(int);
243 
244  auto pair = std::make_pair(grid, tile);
245  auto& ptile = this->GetParticles(lev)[pair];
246  auto& parts = ptile.GetArrayOfStructs();
247  auto& p = parts[index];
248 
249  for (int comp = real_start_comp; comp < real_start_comp + real_num_comp; ++comp)
250  {
251  Real data;
252  std::memcpy(&data, buffer, sizeof(Real));
253  p.rdata(comp) += data;
254  buffer += sizeof(Real);
255  }
256 
257  for (int comp = int_start_comp; comp < int_start_comp + int_num_comp; ++comp)
258  {
259  int data;
260  std::memcpy(&data, buffer, sizeof(int));
261  p.idata(comp) += data;
262  buffer += sizeof(int);
263  }
264  }
265  }
266 #else
267  amrex::ignore_unused(not_ours, real_start_comp, real_num_comp, int_start_comp, int_num_comp);
268 #endif
269 }
270 
271 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
272 void
274 ::updateNeighborsCPU (bool reuse_rcv_counts) {
275 
276  BL_PROFILE_VAR("NeighborParticleContainer::updateNeighborsCPU", update);
277 
278  const int MyProc = ParallelContext::MyProcSub();
279 
280  for (int lev = 0; lev < this->numLevels(); ++lev) {
281  const Periodicity& periodicity = this->Geom(lev).periodicity();
282  const RealBox& prob_domain = this->Geom(lev).ProbDomain();
283 
284  int num_threads = OpenMP::get_max_threads();
285 
286  for (MyParIter pti(*this, lev); pti.isValid(); ++pti) {
287  PairIndex src_index(pti.index(), pti.LocalTileIndex());
288  auto src = pti.GetParticleTile().getParticleTileData();
289  for (int j = 0; j < num_threads; ++j) {
290  auto& tags = buffer_tag_cache[lev][src_index][j];
291  int num_tags = tags.size();
292 #ifdef AMREX_USE_OMP
293 #pragma omp parallel for
294 #endif
295  for (int i = 0; i < num_tags; ++i) {
296  const NeighborCopyTag& tag = tags[i];
297  const int global_who = this->ParticleDistributionMap(tag.level)[tag.grid];
298  const int who = ParallelContext::global_to_local_rank(global_who);
299  if (who == MyProc) {
300  PairIndex dst_index(tag.grid, tag.tile);
301  auto dst = neighbors[tag.level][dst_index].getParticleTileData();
302  copyParticle(dst, src, tag.src_index, tag.dst_index);
303  if (periodicity.isAnyPeriodic()) {
304  auto& aos = neighbors[tag.level][dst_index].GetArrayOfStructs();
305  ParticleType& p = aos[tag.dst_index];
306  for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) {
307  if (! periodicity.isPeriodic(dir)) { continue; }
308  if (tag.periodic_shift[dir] < 0) {
309  p.pos(dir) += static_cast<ParticleReal> (prob_domain.length(dir));
310  } else if (tag.periodic_shift[dir] > 0) {
311  p.pos(dir) -= static_cast<ParticleReal> (prob_domain.length(dir));
312  }
313  }
314  }
315 
316  if ( enableInverse() )
317  {
318  auto& itags = inverse_tags[tag.level][dst_index];
319  AMREX_ASSERT(tag.dst_index < itags.size());
320  itags[tag.dst_index].src_grid = src_index.first;
321  itags[tag.dst_index].src_tile = src_index.second;
322  itags[tag.dst_index].src_index = tag.src_index;
323  itags[tag.dst_index].src_level = lev;
324  }
325  } else {
326  auto& aos = pti.GetArrayOfStructs();
327  auto& soa = pti.GetStructOfArrays();
328  ParticleType p = aos[tag.src_index]; // copy
329  if (periodicity.isAnyPeriodic()) {
330  for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) {
331  if (! periodicity.isPeriodic(dir)) { continue; }
332  if (tag.periodic_shift[dir] < 0) {
333  p.pos(dir) += static_cast<ParticleReal> (prob_domain.length(dir));
334  } else if (tag.periodic_shift[dir] > 0) {
335  p.pos(dir) -= static_cast<ParticleReal> (prob_domain.length(dir));
336  }
337  }
338  }
339 
340  char* dst_ptr = &send_data[who][tag.dst_index];
341  char* src_ptr = (char *) &p;
342  for (int ii = 0; ii < AMREX_SPACEDIM + NStructReal; ++ii) {
343  if (ghost_real_comp[ii]) {
344  std::memcpy(dst_ptr, src_ptr, sizeof(typename ParticleType::RealType));
345  dst_ptr += sizeof(typename ParticleType::RealType);
346  }
347  src_ptr += sizeof(typename ParticleType::RealType);
348  }
349  for (int ii = 0; ii < this->NumRealComps(); ++ii) {
350  if (ghost_real_comp[ii+AMREX_SPACEDIM+NStructReal])
351  {
352  std::memcpy(dst_ptr, &(soa.GetRealData(ii)[tag.src_index]),
353  sizeof(typename ParticleType::RealType));
354  dst_ptr += sizeof(typename ParticleType::RealType);
355  }
356  }
357  for (int ii = 0; ii < 2 + NStructInt; ++ii) {
358  if (ghost_int_comp[ii]) {
359  std::memcpy(dst_ptr, src_ptr, sizeof(int));
360  dst_ptr += sizeof(int);
361  }
362  src_ptr += sizeof(int);
363  }
364  for (int ii = 0; ii < this->NumIntComps(); ++ii) {
365  if (ghost_int_comp[ii+2+NStructInt])
366  {
367  std::memcpy(dst_ptr, &(soa.GetIntData(ii)[tag.src_index]),
368  sizeof(int));
369  dst_ptr += sizeof(int);
370  }
371  }
372  if ( enableInverse() )
373  {
374  std::memcpy(dst_ptr,&(src_index.first),sizeof(int)); dst_ptr += sizeof(int);
375  std::memcpy(dst_ptr,&(src_index.second),sizeof(int)); dst_ptr += sizeof(int);
376  std::memcpy(dst_ptr,&(tag.src_index),sizeof(int)); dst_ptr += sizeof(int);
377  std::memcpy(dst_ptr,&(lev),sizeof(int)); dst_ptr += sizeof(int);
378  }
379  }
380  }
381  }
382  }
383 
384 #ifdef AMREX_USE_OMP
385 #pragma omp parallel
386 #endif
387  for (MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi) {
388  const int grid = mfi.index();
389  const int tile = mfi.LocalTileIndex();
390  PairIndex dst_index(grid, tile);
391  neighbors[lev][dst_index].resize(local_neighbor_sizes[lev][dst_index]);
392  if ( enableInverse() ) {
393  inverse_tags[lev][dst_index].resize(local_neighbor_sizes[lev][dst_index]);
394  }
395  }
396  }
397  BL_PROFILE_VAR_STOP(update);
398 
399  fillNeighborsMPI(reuse_rcv_counts);
400 
401  for (int lev = 0; lev < this->numLevels(); ++lev)
402  {
403  for(MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi)
404  {
405  int src_grid = mfi.index();
406  int src_tile = mfi.LocalTileIndex();
407  auto index = std::make_pair(src_grid, src_tile);
408  auto& ptile = this->GetParticles(lev)[index];
409  ptile.setNumNeighbors(neighbors[lev][index].size());
410  amrex::copyParticles(ptile, neighbors[lev][index], 0,
411  ptile.numRealParticles(), ptile.numNeighborParticles());
412  }
413  }
414 
415 }
416 
417 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
418 void
419 NeighborParticleContainer<NStructReal, NStructInt, NArrayReal, NArrayInt>
420 ::clearNeighborsCPU ()
421 {
422  BL_PROFILE("NeighborParticleContainer::clearNeighborsCPU");
423 
424  resizeContainers(this->numLevels());
425  for (int lev = 0; lev < this->numLevels(); ++lev) {
426  neighbors[lev].clear();
427  if ( enableInverse() ) { inverse_tags[lev].clear(); }
428  buffer_tag_cache[lev].clear();
429 
430  for(MFIter mfi = this->MakeMFIter(lev); mfi.isValid(); ++mfi)
431  {
432  int src_grid = mfi.index();
433  int src_tile = mfi.LocalTileIndex();
434  auto index = std::make_pair(src_grid, src_tile);
435  auto& ptile = this->GetParticles(lev)[index];
436  ptile.setNumNeighbors(0);
437  }
438  }
439 
440  send_data.clear();
441 }
442 
443 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
444 void
446 getRcvCountsMPI () {
447 
448  BL_PROFILE("NeighborParticleContainer::getRcvCountsMPI");
449 
450 #ifdef AMREX_USE_MPI
451  const int NProcs = ParallelContext::NProcsSub();
452 
453  // each proc figures out how many bytes it will send, and how
454  // many it will receive
455  Vector<Long> snds(NProcs, 0);
456  rcvs.resize(NProcs);
457  for (int i = 0; i < NProcs; ++i) {
458  rcvs[i] = 0;
459  }
460 
461  num_snds = 0;
462  for (const auto& kv : send_data) {
463  num_snds += kv.second.size();
464  snds[kv.first] = kv.second.size();
465  }
466 
468 
469  if (num_snds == 0) { return; }
470 
471  const int num_rcvs = neighbor_procs.size();
472  Vector<MPI_Status> stats(num_rcvs);
473  Vector<MPI_Request> rreqs(num_rcvs);
474 
475  const int SeqNum = ParallelDescriptor::SeqNum();
476 
477  // Post receives
478  for (int i = 0; i < num_rcvs; ++i) {
479  const int Who = neighbor_procs[i];
480  const Long Cnt = 1;
481 
482  AMREX_ASSERT(Who >= 0 && Who < NProcs);
483 
484  rreqs[i] = ParallelDescriptor::Arecv(&rcvs[Who], Cnt, Who, SeqNum,
486  }
487 
488  // Send.
489  for (int i = 0; i < num_rcvs; ++i) {
490  const int Who = neighbor_procs[i];
491  const Long Cnt = 1;
492 
493  AMREX_ASSERT(Who >= 0 && Who < NProcs);
494 
495  ParallelDescriptor::Send(&snds[Who], Cnt, Who, SeqNum,
497  }
498 
499  if (num_rcvs > 0) { ParallelDescriptor::Waitall(rreqs, stats); }
500 
501 #endif // AMREX_USE_MPI
502 }
503 
504 template <int NStructReal, int NStructInt, int NArrayReal, int NArrayInt>
505 void
507 fillNeighborsMPI (bool reuse_rcv_counts) {
508 
509  BL_PROFILE("NeighborParticleContainer::fillNeighborsMPI");
510 
511 #ifdef AMREX_USE_MPI
512  const int NProcs = ParallelContext::NProcsSub();
513 
514  // each proc figures out how many bytes it will send, and how
515  // many it will receive
516  if (!reuse_rcv_counts) { getRcvCountsMPI(); }
517  if (num_snds == 0) { return; }
518 
519  Vector<int> RcvProc;
520  Vector<std::size_t> rOffset; // Offset (in bytes) in the receive buffer
521  std::size_t TotRcvBytes = 0;
522  for (int i = 0; i < NProcs; ++i) {
523  if (rcvs[i] > 0) {
524  RcvProc.push_back(i);
525  rOffset.push_back(TotRcvBytes);
526  TotRcvBytes += rcvs[i];
527  }
528  }
529 
530  const auto nrcvs = int(RcvProc.size());
531  Vector<MPI_Status> stats(nrcvs);
532  Vector<MPI_Request> rreqs(nrcvs);
533 
534  const int SeqNum = ParallelDescriptor::SeqNum();
535 
536  // Allocate data for rcvs as one big chunk.
537  Vector<char> recvdata(TotRcvBytes);
538 
539  // Post receives.
540  for (int i = 0; i < nrcvs; ++i) {
541  const auto Who = RcvProc[i];
542  const auto offset = rOffset[i];
543  const auto Cnt = rcvs[Who];
544 
545  AMREX_ASSERT(Cnt > 0);
547  AMREX_ASSERT(Who >= 0 && Who < NProcs);
548 
549  rreqs[i] = ParallelDescriptor::Arecv(&recvdata[offset], Cnt, Who, SeqNum,
551  }
552 
553  // Send.
554  for (const auto& kv : send_data) {
555  const auto Who = kv.first;
556  const auto Cnt = kv.second.size();
557 
558  AMREX_ASSERT(Cnt > 0);
559  AMREX_ASSERT(Who >= 0 && Who < NProcs);
561 
562  ParallelDescriptor::Send(kv.second.data(), Cnt, Who, SeqNum);
563  }
564 
565  // unpack the received data and put them into the proper neighbor buffers
566  if (nrcvs > 0) {
567  ParallelDescriptor::Waitall(rreqs, stats);
568  for (int i = 0; i < nrcvs; ++i) {
569  const auto offset = int(rOffset[i]);
570  char* buffer = &recvdata[offset];
571  int num_tiles, lev, gid, tid, size, np;
572  std::memcpy(&num_tiles, buffer, sizeof(int)); buffer += sizeof(int);
573  for (int j = 0; j < num_tiles; ++j) {
574  std::memcpy(&lev, buffer, sizeof(int)); buffer += sizeof(int);
575  std::memcpy(&gid, buffer, sizeof(int)); buffer += sizeof(int);
576  std::memcpy(&tid, buffer, sizeof(int)); buffer += sizeof(int);
577  std::memcpy(&size, buffer, sizeof(int)); buffer += sizeof(int);
578 
579  if (size == 0) { continue; }
580 
581  np = size / cdata_size;
582 
583  AMREX_ASSERT(size % cdata_size == 0);
584 
585  PairIndex dst_index(gid, tid);
586  size_t old_size = neighbors[lev][dst_index].size();
587  size_t new_size = neighbors[lev][dst_index].size() + np;
588  if ( enableInverse() )
589  {
590  AMREX_ASSERT(neighbors[lev][dst_index].size() ==
591  size_t(inverse_tags[lev][dst_index].size()));
592  inverse_tags[lev][dst_index].resize(new_size);
593  }
594  neighbors[lev][dst_index].resize(new_size);
595 
596  char* src = buffer;
597  for (int n = 0; n < np; ++n) {
598  char* dst_aos = (char*) &neighbors[lev][dst_index].GetArrayOfStructs()[old_size+n];
599  auto& dst_soa = neighbors[lev][dst_index].GetStructOfArrays();
600  for (int ii = 0; ii < AMREX_SPACEDIM + NStructReal; ++ii) {
601  if (ghost_real_comp[ii]) {
602  std::memcpy(dst_aos, src, sizeof(typename ParticleType::RealType));
603  src += sizeof(typename ParticleType::RealType);
604  }
605  dst_aos += sizeof(typename ParticleType::RealType);
606  }
607  for (int ii = 0; ii < this->NumRealComps(); ++ii) {
608  if (ghost_real_comp[ii+AMREX_SPACEDIM+NStructReal])
609  {
610  std::memcpy(&(dst_soa.GetRealData(ii)[old_size+n]),
611  src, sizeof(typename ParticleType::RealType));
612  src += sizeof(typename ParticleType::RealType);
613  }
614  }
615  for (int ii = 0; ii < 2 + NStructInt; ++ii) {
616  if (ghost_int_comp[ii]) {
617  std::memcpy(dst_aos, src, sizeof(int));
618  src += sizeof(int);
619  }
620  dst_aos += sizeof(int);
621  }
622  for (int ii = 0; ii < this->NumIntComps(); ++ii) {
623  if (ghost_int_comp[ii+2+NStructInt])
624  {
625  std::memcpy(&(dst_soa.GetIntData(ii)[old_size+n]),
626  src, sizeof(int));
627  src += sizeof(int);
628  }
629  }
630 
631  if ( enableInverse() )
632  {
633  auto& tag = inverse_tags[lev][dst_index][old_size+n];
634  std::memcpy(&(tag.src_grid),src,sizeof(int));
635  src += sizeof(int);
636 
637  std::memcpy(&(tag.src_tile),src,sizeof(int));
638  src += sizeof(int);
639 
640  std::memcpy(&(tag.src_index),src,sizeof(int));
641  src += sizeof(int);
642 
643  std::memcpy(&(tag.src_level),src,sizeof(int));
644  src += sizeof(int);
645  }
646  }
647  buffer += size;
648  }
649  }
650  }
651 #else
652  amrex::ignore_unused(reuse_rcv_counts);
653 #endif
654 }
655 
656 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_PROFILE_VAR_STOP(vname)
Definition: AMReX_BLProfiler.H:563
#define BL_PROFILE_VAR(fname, vname)
Definition: AMReX_BLProfiler.H:560
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
Print on all processors of the default communicator.
Definition: AMReX_Print.H:117
Definition: AMReX_NeighborParticles.H:35
std::pair< int, int > PairIndex
Definition: AMReX_NeighborParticles.H:196
MPI_Request req() const
Definition: AMReX_ParallelDescriptor.H:74
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition: AMReX_Periodicity.H:17
bool isAnyPeriodic() const noexcept
Definition: AMReX_Periodicity.H:22
bool isPeriodic(int dir) const noexcept
Definition: AMReX_Periodicity.H:26
This class provides the user with a few print options.
Definition: AMReX_Print.H:35
A Box with real dimensions. A RealBox is OK iff volume >= 0.
Definition: AMReX_RealBox.H:21
AMREX_GPU_HOST_DEVICE Real length(int dir) const noexcept
Returns length in specified direction.
Definition: AMReX_RealBox.H:62
Long size() const noexcept
Definition: AMReX_Vector.H:50
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T Max(T *const m, T const value) noexcept
Definition: AMReX_GpuAtomic.H:417
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void * memcpy(void *dest, const void *src, std::size_t count)
Definition: AMReX_GpuUtility.H:214
int NProcs()
Process ID in MPI_COMM_WORLD.
Definition: AMReX_MPMD.cpp:122
int MyProc()
Definition: AMReX_MPMD.cpp:117
constexpr int get_max_threads()
Definition: AMReX_OpenMP.H:36
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition: AMReX_ParallelContext.H:76
int global_to_local_rank(int rank) noexcept
Definition: AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition: AMReX_ParallelContext.H:74
void Waitall(Vector< MPI_Request > &, Vector< MPI_Status > &)
Definition: AMReX_ParallelDescriptor.cpp:1295
Message Send(const T *buf, size_t n, int dst_pid, int tag)
Definition: AMReX_ParallelDescriptor.H:1109
int SeqNum() noexcept
Returns sequential message sequence numbers, usually used as tags for send/recv.
Definition: AMReX_ParallelDescriptor.H:613
Message Arecv(T *, size_t n, int pid, int tag)
Definition: AMReX_ParallelDescriptor.H:1130
@ max
Definition: AMReX_ParallelReduce.H:17
void copyParticles(DstTile &dst, const SrcTile &src) noexcept
Copy particles from src to dst. This version copies all the particles, writing them to the beginning ...
Definition: AMReX_ParticleTransformation.H:158
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
const int[]
Definition: AMReX_BLProfiler.cpp:1664
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void copyParticle(const ParticleTileData< T_ParticleType, NAR, NAI > &dst, const ConstParticleTileData< T_ParticleType, NAR, NAI > &src, int src_i, int dst_i) noexcept
A general single particle copying routine that can run on the GPU.
Definition: AMReX_ParticleTransformation.H:31