Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_WriteBinaryParticleData.H
Go to the documentation of this file.
1#ifndef AMREX_WRITE_BINARY_PARTICLE_DATA_H
2#define AMREX_WRITE_BINARY_PARTICLE_DATA_H
3#include <AMReX_Config.H>
4
5#include <AMReX_TypeTraits.H>
7#include <AMReX_GpuDevice.H>
8#include <AMReX_VisMF.H>
9
10namespace amrex {
11
13{
14 template <typename SrcData>
16 int operator() (const SrcData& src, int i) const noexcept
17 {
18 return (src.id(i).is_valid());
19 }
20};
21
23namespace particle_detail {
24
25template <typename ParticleReal>
26std::size_t PSizeInFile (const Vector<int>& wrc, const Vector<int>& wic)
27{
28 std::size_t rsize = sizeof(ParticleReal)*std::accumulate(wrc.begin(), wrc.end(), 0);
29 std::size_t isize = sizeof(int)*std::accumulate(wic.begin(), wic.end(), 0);
30 return rsize + isize + AMREX_SPACEDIM*sizeof(ParticleReal) + 2*sizeof(int);
31}
32
33template <class Container,
34 class PTile,
35 class F>
36void
37fillFlagsGpu (Container& pflags, const PTile& ptile, F const& f)
38{
39 const auto& ptd = ptile.getConstParticleTileData();
40 const auto np = ptile.numParticles();
41 pflags.resize(np, 0);
42 auto flag_ptr = pflags.data();
44 [=] AMREX_GPU_DEVICE (int k, amrex::RandomEngine const& engine) noexcept
45 {
46 const auto p = ptd.getSuperParticle(k);
47 amrex::ignore_unused(flag_ptr, f, engine);
48 if constexpr (IsCallable<F,decltype(p),RandomEngine>::value) {
49 flag_ptr[k] = f(p,engine);
50 } else if constexpr (IsCallable<F,decltype(p)>::value) {
51 flag_ptr[k] = f(p);
52 } else if constexpr (IsCallable<F,decltype(ptd),int,RandomEngine>::value) {
53 flag_ptr[k] = f(ptd,k,engine);
54 } else {
55 flag_ptr[k] = f(ptd,k);
56 }
57 });
58}
59
60template <class Container,
61 class PTile,
62 class F>
63void
64fillFlagsCpu (Container& pflags, const PTile& ptile, F const& f)
65{
66 const auto& ptd = ptile.getConstParticleTileData();
67 const auto np = ptile.numParticles();
68 pflags.resize(np, 0);
69 auto flag_ptr = pflags.data();
70 for (int k = 0; k < np; ++k) {
71 const auto p = ptd.getSuperParticle(k);
72 if constexpr (IsCallable<F,decltype(p),RandomEngine>::value) {
73 flag_ptr[k] = f(p,getInvalidRandomEngine());
74 } else if constexpr (IsCallable<F,decltype(p)>::value) {
75 flag_ptr[k] = f(p);
76 } else if constexpr (IsCallable<F,decltype(ptd),int,RandomEngine>::value) {
77 flag_ptr[k] = f(ptd,k,getInvalidRandomEngine());
78 } else {
79 flag_ptr[k] = f(ptd,k);
80 }
81 }
82}
83
84template <template <class, class> class Container,
85 class Allocator,
86 class PTile,
87 class F>
88void
89fillFlags (Container<int, Allocator>& pflags, const PTile& ptile, F const& f)
90{
91 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
92 if (pflags.arena()->isManaged() || pflags.arena()->isDevice()) {
93 fillFlagsGpu(pflags, ptile, f);
94 } else {
95 fillFlagsCpu(pflags, ptile, f);
96 }
97 } else {
98 if constexpr (RunOnGpu<Allocator>::value) {
99 fillFlagsGpu(pflags, ptile, f);
100 } else {
101 fillFlagsCpu(pflags, ptile, f);
102 }
103 }
104}
105
106template <class Container, class PC>
108countFlagsGpu (const Vector<std::map<std::pair<int,int>,Container>>& particle_io_flags, const PC& pc)
109{
110 ReduceOps<ReduceOpSum> reduce_op;
111 ReduceData<Long> reduce_data(reduce_op);
112 using ReduceTuple = typename decltype(reduce_data)::Type;
113
114 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
115 {
116 const auto& pmap = pc.GetParticles(lev);
117 for (const auto& kv : pmap)
118 {
119 const auto& pflags = particle_io_flags[lev].at(kv.first);
120 const auto flag_ptr = pflags.data();
121 reduce_op.eval(pflags.size(), reduce_data,
122 [=] AMREX_GPU_DEVICE (const int i) -> ReduceTuple
123 {
124 return flag_ptr[i] ? 1 : 0;
125 });
126 }
127 }
128 ReduceTuple hv = reduce_data.value(reduce_op);
129 return amrex::get<0>(hv);
130}
131
132template <class Container>
134countFlagsGpu (const Container& pflags)
135{
136 ReduceOps<ReduceOpSum> reduce_op;
137 ReduceData<Long> reduce_data(reduce_op);
138 using ReduceTuple = typename decltype(reduce_data)::Type;
139
140 const auto flag_ptr = pflags.data();
141 reduce_op.eval(pflags.size(), reduce_data,
142 [=] AMREX_GPU_DEVICE (const amrex::Long i) -> ReduceTuple
143 {
144 return flag_ptr[i] ? 1 : 0;
145 });
146 ReduceTuple hv = reduce_data.value(reduce_op);
147 return amrex::get<0>(hv);
148}
149
150template <class Container, class PC>
152countFlagsCpu (const Vector<std::map<std::pair<int,int>,Container>>& particle_io_flags, const PC& pc)
153{
154 amrex::Long nparticles = 0;
155 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
156 {
157 const auto& pmap = pc.GetParticles(lev);
158 for (const auto& kv : pmap)
159 {
160 const auto& pflags = particle_io_flags[lev].at(kv.first);
161 for (int k = 0; k < kv.second.numParticles(); ++k)
162 {
163 if (pflags[k]) { nparticles++; }
164 }
165 }
166 }
167 return nparticles;
168}
169
170template <class Container>
172countFlagsCpu (const Container& pflags)
173{
174 amrex::Long nparticles = 0;
175 for (std::size_t k = 0; k < pflags.size(); ++k)
176 {
177 if (pflags[k]) { nparticles++; }
178 }
179 return nparticles;
180}
181
182template <template <class, class> class Container, class Allocator, class PC>
184countFlags (const Vector<std::map<std::pair<int,int>,Container<int,Allocator>>>& particle_io_flags, const PC& pc)
185{
186 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
187 if (pc.arena()->isManaged() || pc.arena()->isDevice()) {
188 return countFlagsGpu(particle_io_flags, pc);
189 } else {
190 return countFlagsCpu(particle_io_flags, pc);
191 }
192 } else {
193 if constexpr (RunOnGpu<Allocator>::value) {
194 return countFlagsGpu(particle_io_flags, pc);
195 } else {
196 return countFlagsCpu(particle_io_flags, pc);
197 }
198 }
199}
200
201template <template <class, class> class Container, class Allocator>
203countFlags (const Container<int,Allocator>& pflags)
204{
205 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
206 if (pflags.arena()->isManaged() || pflags.arena()->isDevice()) {
207 return countFlagsGpu(pflags);
208 } else {
209 return countFlagsCpu(pflags);
210 }
211 } else {
212 if constexpr (RunOnGpu<Allocator>::value) {
213 return countFlagsGpu(pflags);
214 } else {
215 return countFlagsCpu(pflags);
216 }
217 }
218}
219
220template <typename I>
222void packParticleIDs (I* idata, const std::uint64_t idcpu, bool is_checkpoint) noexcept
223{
224 if (is_checkpoint) {
225 std::int32_t xi, yi;
226 std::uint32_t xu, yu;
227 xu = (std::uint32_t)((idcpu & 0xFFFFFFFF00000000LL) >> 32);
228 yu = (std::uint32_t)( idcpu & 0xFFFFFFFFLL);
229 amrex::Gpu::memcpy(&xi, &xu, sizeof(xu));
230 amrex::Gpu::memcpy(&yi, &yu, sizeof(yu));
231 idata[0] = xi;
232 idata[1] = yi;
233 } else {
234 idata[0] = ConstParticleIDWrapper{idcpu};
235 idata[1] = ConstParticleCPUWrapper{idcpu};
236 }
237}
238
239template<class PTD>
241rPackParticleData (const PTD& ptd, int idx, std::remove_const_t<typename PTD::RealType> * rdata_ptr,
242 const int * write_real_comp)
243{
244 std::size_t rout_index = 0;
245
246 if constexpr (!PTD::ParticleType::is_rtsoa_particle) {
247 for (int j = 0; j < AMREX_SPACEDIM; ++j) {
248 rdata_ptr[rout_index] = ptd.pos(j, idx);
249 rout_index++;
250 }
251
252 if constexpr (!PTD::ParticleType::is_soa_particle) {
253 const auto& p = ptd[idx];
254
255 for (int j = 0; j < PTD::ParticleType::NReal; ++j) {
256 if (write_real_comp[j]) {
257 rdata_ptr[rout_index] = p.rdata(j);
258 rout_index++;
259 }
260 }
261 }
262
263 constexpr int real_start_offset = PTD::ParticleType::is_soa_particle ? AMREX_SPACEDIM : 0;
264
265 if constexpr (PTD::NAR > 0) {
266 for (int j = real_start_offset; j < PTD::NAR; ++j) {
267 if (write_real_comp[PTD::ParticleType::NReal + j - real_start_offset]) {
268 rdata_ptr[rout_index] = ptd.rdata(j)[idx];
269 rout_index++;
270 }
271 }
272 }
273
274 for (int j = 0; j < ptd.m_num_runtime_real; ++j) {
275 if (write_real_comp[PTD::ParticleType::NReal + PTD::NAR + j - real_start_offset]) {
276 rdata_ptr[rout_index] = ptd.m_runtime_rdata[j][idx];
277 rout_index++;
278 }
279 }
280 } else {
281 for (int j = 0; j < ptd.m_n_real; ++j) {
282 if ((j < AMREX_SPACEDIM) || write_real_comp[j - AMREX_SPACEDIM]) {
283 rdata_ptr[rout_index] = ptd.rdata(j)[idx];
284 rout_index++;
285 }
286 }
287 }
288}
289
290template<class PTD>
292iPackParticleData (const PTD& ptd, int idx, std::remove_const_t<typename PTD::IntType> * idata_ptr,
293 const int * write_int_comp, bool is_checkpoint)
294{
295 std::size_t iout_index = 0;
296
297 packParticleIDs(&idata_ptr[iout_index], ptd.idcpu(idx), is_checkpoint);
298 iout_index += 2;
299
300 if constexpr (!PTD::ParticleType::is_rtsoa_particle) {
301 if constexpr (!PTD::ParticleType::is_soa_particle) {
302 const auto& p = ptd[idx];
303
304 for (int j = 0; j < PTD::ParticleType::NInt; ++j) {
305 if (write_int_comp[j]) {
306 idata_ptr[iout_index] = p.idata(j);
307 iout_index++;
308 }
309 }
310 }
311
312 if constexpr (PTD::NAI > 0) {
313 for (int j = 0; j < PTD::NAI; ++j) {
314 if (write_int_comp[PTD::ParticleType::NInt + j]) {
315 idata_ptr[iout_index] = ptd.idata(j)[idx];
316 iout_index++;
317 }
318 }
319 }
320
321 for (int j = 0; j < ptd.m_num_runtime_int; ++j) {
322 if (write_int_comp[PTD::ParticleType::NInt + PTD::NAI + j]) {
323 idata_ptr[iout_index] = ptd.m_runtime_idata[j][idx];
324 iout_index++;
325 }
326 }
327 } else {
328 for (int j = 0; j < ptd.m_n_int; ++j) {
329 if (write_int_comp[j]) {
330 idata_ptr[iout_index] = ptd.idata(j)[idx];
331 iout_index++;
332 }
333 }
334 }
335}
336
337template <class PC>
338void
339packIODataGpu (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
340 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
341 const Vector<std::map<std::pair<int, int>, typename PC::FlagsVector>>& particle_io_flags,
342 const Vector<int>& tiles, int np, bool is_checkpoint)
343{
344 int num_output_int = 0;
345 for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) {
346 if (write_int_comp[i]) { ++num_output_int; }
347 }
348
349 const Long iChunkSize = 2 + num_output_int;
350 idata.resize(np*iChunkSize);
351
352 int num_output_real = 0;
353 for (int i : write_real_comp) {
354 if (i) { ++num_output_real; }
355 }
356
357 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
358 rdata.resize(np*rChunkSize);
359
360 Gpu::DeviceVector<int> write_int_comp_d(write_int_comp.size());
361 Gpu::DeviceVector<int> write_real_comp_d(write_real_comp.size());
362 Gpu::copyAsync(Gpu::hostToDevice, write_int_comp.begin(), write_int_comp.end(),
363 write_int_comp_d.begin());
364 Gpu::copyAsync(Gpu::hostToDevice, write_real_comp.begin(), write_real_comp.end(),
365 write_real_comp_d.begin());
366
367 const auto* write_int_comp_d_ptr = write_int_comp_d.data();
368 const auto* write_real_comp_d_ptr = write_real_comp_d.data();
369
370 std::size_t poffset = 0;
371 for (int tile : tiles) {
372 const auto& ptile = pc.ParticlesAt(lev, grid, tile);
373 const auto& pflags = particle_io_flags[lev].at(std::make_pair(grid, tile));
374 int np_tile = ptile.numParticles();
375 Gpu::DeviceVector<int> offsets(np_tile);
376 int num_copies = Scan::ExclusiveSum(np_tile, pflags.begin(), offsets.begin(), Scan::retSum);
377
378 Gpu::DeviceVector<int> idata_d(num_copies*iChunkSize);
379 Gpu::DeviceVector<ParticleReal> rdata_d(num_copies*rChunkSize);
380
381 const auto* flag_ptr = pflags.data();
382 const auto* offset_ptr = offsets.data();
383
384 auto* idata_d_ptr = idata_d.data();
385 auto* rdata_d_ptr = rdata_d.data();
386
387 const auto& ptd = ptile.getConstParticleTileData();
388 amrex::ParallelFor(ptile.numParticles(),
389 [=] AMREX_GPU_DEVICE (int pindex) noexcept
390 {
391 if (flag_ptr[pindex]) {
392 const int out_indx = offset_ptr[pindex];
393 iPackParticleData(ptd, pindex, idata_d_ptr + out_indx * iChunkSize,
394 write_int_comp_d_ptr, is_checkpoint);
395
396 rPackParticleData(ptd, pindex, rdata_d_ptr + out_indx * rChunkSize,
397 write_real_comp_d_ptr);
398 }
399 });
400
401 Gpu::copyAsync(Gpu::deviceToHost, idata_d.begin(), idata_d.end(),
402 idata.begin() + static_cast<Long>(poffset * iChunkSize));
403 Gpu::copyAsync(Gpu::deviceToHost, rdata_d.begin(), rdata_d.end(),
404 rdata.begin() + static_cast<Long>(poffset * rChunkSize));
405 Gpu::Device::streamSynchronize();
406
407 poffset += num_copies;
408 }
409}
410
411template <class PC>
412void
413packIODataCpu (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
414 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
415 const Vector<std::map<std::pair<int, int>, typename PC::FlagsVector>>& particle_io_flags,
416 const Vector<int>& tiles, int np, bool is_checkpoint)
417{
418 int num_output_int = 0;
419 for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) {
420 if (write_int_comp[i]) { ++num_output_int; }
421 }
422
423 const Long iChunkSize = 2 + num_output_int;
424 idata.resize(np*iChunkSize);
425
426 int num_output_real = 0;
427 for (int i : write_real_comp) {
428 if (i) { ++num_output_real; }
429 }
430
431 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
432 rdata.resize(np*rChunkSize);
433
434 int* iptr = idata.dataPtr();
435 ParticleReal* rptr = rdata.dataPtr();
436 for (int tile : tiles) {
437 const auto& ptile = pc.ParticlesAt(lev, grid, tile);
438 const auto& pflags = particle_io_flags[lev].at(std::make_pair(grid, tile));
439 const auto& ptd = ptile.getConstParticleTileData();
440
441 for (int pindex = 0; pindex < ptile.numParticles(); ++pindex) {
442 if (pflags[pindex]) {
443 iPackParticleData(ptd, pindex, iptr,
444 write_int_comp.dataPtr(), is_checkpoint);
445 iptr += iChunkSize;
446
447 rPackParticleData(ptd, pindex, rptr,
448 write_real_comp.dataPtr());
449 rptr += rChunkSize;
450 }
451 }
452 }
453}
454
455template <class PC>
456void
457packIOData (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
458 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
459 const Vector<std::map<std::pair<int, int>, typename PC::FlagsVector>>& particle_io_flags,
460 const Vector<int>& tiles, int np, bool is_checkpoint)
461{
462 if constexpr (IsPolymorphicArenaAllocator<typename PC::FlagsVector::allocator_type>::value) {
463 if (pc.arena()->isManaged() || pc.arena()->isDevice()) {
464 packIODataGpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
465 particle_io_flags, tiles, np, is_checkpoint);
466 } else {
467 packIODataCpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
468 particle_io_flags, tiles, np, is_checkpoint);
469 }
470 } else {
471 if constexpr (RunOnGpu<typename PC::FlagsVector::allocator_type>::value) {
472 packIODataGpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
473 particle_io_flags, tiles, np, is_checkpoint);
474 } else {
475 packIODataCpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
476 particle_io_flags, tiles, np, is_checkpoint);
477 }
478 }
479}
480
481}
483
484template <class PC, class F, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
486 const std::string& dir, const std::string& name,
487 const Vector<int>& write_real_comp,
488 const Vector<int>& write_int_comp,
489 const Vector<std::string>& real_comp_names,
490 const Vector<std::string>& int_comp_names,
491 F const& f, bool is_checkpoint)
492{
493 BL_PROFILE("WriteBinaryParticleData()");
494 AMREX_ASSERT(pc.OK());
495
496 AMREX_ASSERT(sizeof(typename PC::ParticleType::RealType) == 4 ||
497 sizeof(typename PC::ParticleType::RealType) == 8);
498
499 constexpr int NStructReal = PC::NStructReal;
500 constexpr int NStructInt = PC::NStructInt;
501
502 const int NProcs = ParallelDescriptor::NProcs();
503 const int IOProcNumber = ParallelDescriptor::IOProcessorNumber();
504
505 if constexpr(PC::ParticleType::is_soa_particle) {
506 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal - AMREX_SPACEDIM); // pure SoA: skip positions
507 } else {
508 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal);
509 }
510 AMREX_ALWAYS_ASSERT( int_comp_names.size() == pc.NumIntComps() + NStructInt);
511
512 std::string pdir = dir;
513 if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; }
514 pdir += name;
515
516 if ( ! pc.GetLevelDirectoriesCreated()) {
518 {
519 if ( ! amrex::UtilCreateDirectory(pdir, 0755))
520 {
522 }
523 }
525 }
526
527 std::ofstream HdrFile;
528
529 Long nparticles = 0;
530 Long maxnextid;
531
532 // evaluate f for every particle to determine which ones to output
533 Vector<std::map<std::pair<int, int>, typename PC::FlagsVector > >
534 particle_io_flags(pc.GetParticles().size());
535
536 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
537 {
538 const auto& pmap = pc.GetParticles(lev);
539 for (const auto& kv : pmap)
540 {
541 auto& flags = particle_io_flags[lev][kv.first];
542 if constexpr (PC::has_polymorphic_allocator) {
543 flags.setArena(pc.arena());
544 }
545 particle_detail::fillFlags(flags, kv.second, f);
546 }
547 }
548
550
551 if(pc.GetUsePrePost())
552 {
553 nparticles = pc.GetNParticlesPrePost();
554 maxnextid = pc.GetMaxNextIDPrePost();
555 }
556 else
557 {
558 nparticles = particle_detail::countFlags(particle_io_flags, pc);
559 maxnextid = PC::ParticleType::NextID();
560 ParallelDescriptor::ReduceLongSum(nparticles, IOProcNumber);
561 PC::ParticleType::NextID(maxnextid);
562 ParallelDescriptor::ReduceLongMax(maxnextid, IOProcNumber);
563 }
564
566 {
567 std::string HdrFileName = pdir;
568
569 if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
570 HdrFileName += '/';
571 }
572
573 HdrFileName += "Header";
574 pc.HdrFileNamePrePost = HdrFileName;
575
576 HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
577
578 if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); }
579
580 //
581 // First thing written is our version string.
582 // We append "_single" or "_double" to the version string indicating
583 // whether we're using "float" or "double" floating point data.
584 //
585 std::string version_string = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion();
586 if (sizeof(typename PC::ParticleType::RealType) == 4)
587 {
588 HdrFile << version_string << "_single" << '\n';
589 }
590 else
591 {
592 HdrFile << version_string << "_double" << '\n';
593 }
594
595 int num_output_real = 0;
596 for (int i : write_real_comp) {
597 if (i) { ++num_output_real; }
598 }
599
600 int num_output_int = 0;
601 for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) {
602 if (write_int_comp[i]) { ++num_output_int; }
603 }
604
605 // AMREX_SPACEDIM and N for sanity checking.
606 HdrFile << AMREX_SPACEDIM << '\n';
607
608 // The number of extra real parameters
609 HdrFile << num_output_real << '\n';
610
611 // Real component names
612 for (int i = 0; i < (int) real_comp_names.size(); ++i ) {
613 if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; }
614 }
615
616 // The number of extra int parameters
617 HdrFile << num_output_int << '\n';
618
619 // int component names
620 for (int i = 0; i < NStructInt + pc.NumIntComps(); ++i ) {
621 if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; }
622 }
623
624 bool is_checkpoint_legacy = true; // legacy
625 HdrFile << is_checkpoint_legacy << '\n';
626
627 // The total number of particles.
628 HdrFile << nparticles << '\n';
629
630 // The value of nextid that we need to restore on restart.
631 HdrFile << maxnextid << '\n';
632
633 // Then the finest level of the AMR hierarchy.
634 HdrFile << pc.finestLevel() << '\n';
635
636 // Then the number of grids at each level.
637 for (int lev = 0; lev <= pc.finestLevel(); lev++) {
638 HdrFile << pc.ParticleBoxArray(lev).size() << '\n';
639 }
640 }
641
642 // We want to write the data out in parallel.
643 // We'll allow up to nOutFiles active writers at a time.
644 int nOutFiles(256);
645
646 ParmParse pp("particles");
647 pp.queryAdd("particles_nfiles",nOutFiles);
648 if(nOutFiles == -1) { nOutFiles = NProcs; }
649 nOutFiles = std::max(1, std::min(nOutFiles,NProcs));
650 pc.nOutFilesPrePost = nOutFiles;
651
652 for (int lev = 0; lev <= pc.finestLevel(); lev++)
653 {
654 bool gotsome;
655 if(pc.usePrePost)
656 {
657 gotsome = (pc.nParticlesAtLevelPrePost[lev] > 0);
658 }
659 else
660 {
661 gotsome = (pc.NumberOfParticlesAtLevel(lev) > 0);
662 }
663
664 // We store the particles at each level in their own subdirectory.
665 std::string LevelDir = pdir;
666
667 if (gotsome)
668 {
669 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
670
671 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
672
673 if ( ! pc.GetLevelDirectoriesCreated())
674 {
676 if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) {
678 }
679 }
681 }
682 }
683
684 // Write out the header for each particle
685 if (gotsome && ParallelDescriptor::IOProcessor()) {
686 std::string HeaderFileName = LevelDir;
687 HeaderFileName += "/Particle_H";
688 std::ofstream ParticleHeader(HeaderFileName);
689
690 pc.ParticleBoxArray(lev).writeOn(ParticleHeader);
691 ParticleHeader << '\n';
692
693 ParticleHeader.flush();
694 ParticleHeader.close();
695 }
696
697 MFInfo info;
698 info.SetAlloc(false);
699 MultiFab state(pc.ParticleBoxArray(lev),
700 pc.ParticleDistributionMap(lev),
701 1,0,info);
702
703 // We eventually want to write out the file name and the offset
704 // into that file into which each grid of particles is written.
705 Vector<int> which(state.size(),0);
706 Vector<int > count(state.size(),0);
707 Vector<Long> where(state.size(),0);
708
709 std::string filePrefix(LevelDir);
710 filePrefix += '/';
711 filePrefix += PC::DataPrefix();
712 if(pc.usePrePost) {
713 pc.filePrefixPrePost[lev] = filePrefix;
714 }
715 bool groupSets(false), setBuf(true);
716
717 if (gotsome)
718 {
719 for(NFilesIter nfi(nOutFiles, filePrefix, groupSets, setBuf); nfi.ReadyToWrite(); ++nfi)
720 {
721 auto& myStream = (std::ofstream&) nfi.Stream();
722 pc.WriteParticles(lev, myStream, nfi.FileNumber(), which, count, where,
723 write_real_comp, write_int_comp, particle_io_flags, is_checkpoint);
724 }
725
726 if(pc.usePrePost) {
727 pc.whichPrePost[lev] = which;
728 pc.countPrePost[lev] = count;
729 pc.wherePrePost[lev] = where;
730 } else {
731 ParallelDescriptor::ReduceIntSum (which.dataPtr(), static_cast<int>(which.size()), IOProcNumber);
732 ParallelDescriptor::ReduceIntSum (count.dataPtr(), static_cast<int>(count.size()), IOProcNumber);
733 ParallelDescriptor::ReduceLongSum(where.dataPtr(), static_cast<int>(where.size()), IOProcNumber);
734 }
735 }
736
738 {
739 if(pc.GetUsePrePost()) {
740 // ---- write to the header and unlink in CheckpointPost
741 } else {
742 for (int j = 0; j < state.size(); j++)
743 {
744 HdrFile << which[j] << ' ' << count[j] << ' ' << where[j] << '\n';
745 }
746
747 if (gotsome && pc.doUnlink)
748 {
749 // Unlink any zero-length data files.
750 Vector<Long> cnt(nOutFiles,0);
751
752 for (int i = 0, N=static_cast<int>(count.size()); i < N; i++) {
753 cnt[which[i]] += count[i];
754 }
755
756 for (int i = 0, N=static_cast<int>(cnt.size()); i < N; i++)
757 {
758 if (cnt[i] == 0)
759 {
760 std::string FullFileName = NFilesIter::FileName(i, filePrefix);
761 FileSystem::Remove(FullFileName);
762 }
763 }
764 }
765 }
766 }
769 }
770 }
771
773 {
774 HdrFile.flush();
775 HdrFile.close();
776 if ( ! HdrFile.good())
777 {
778 amrex::Abort("amrex::WriteBinaryParticleDataSync(): problem writing HdrFile");
779 }
780 }
781}
782
783template <class PC, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
785 const std::string& dir, const std::string& name,
786 const Vector<int>& write_real_comp,
787 const Vector<int>& write_int_comp,
788 const Vector<std::string>& real_comp_names,
789 const Vector<std::string>& int_comp_names, bool is_checkpoint)
790{
791 BL_PROFILE("WriteBinaryParticleDataAsync");
792 AMREX_ASSERT(pc.OK());
793
794 AMREX_ASSERT(sizeof(typename PC::ParticleType::RealType) == 4 ||
795 sizeof(typename PC::ParticleType::RealType) == 8);
796
797 constexpr int NStructReal = PC::NStructReal;
798 constexpr int NStructInt = PC::NStructInt;
799 constexpr int NArrayReal = PC::NArrayReal;
800 constexpr int NArrayInt = PC::NArrayInt;
801
802 const int MyProc = ParallelDescriptor::MyProc();
803 const int NProcs = ParallelDescriptor::NProcs();
804 const int IOProcNumber = NProcs - 1;
805
806 if constexpr(PC::ParticleType::is_soa_particle) {
807 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal - AMREX_SPACEDIM); // pure SoA: skip positions
808 } else {
809 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal);
810 }
811 AMREX_ALWAYS_ASSERT( int_comp_names.size() == pc.NumIntComps() + NStructInt);
812
813 Vector<LayoutData<Long> > np_per_grid_local(pc.finestLevel()+1);
814 for (int lev = 0; lev <= pc.finestLevel(); lev++)
815 {
816 np_per_grid_local[lev].define(pc.ParticleBoxArray(lev), pc.ParticleDistributionMap(lev));
817 using ParIter = typename PC::ParConstIterType;
818 for (ParIter pti(pc, lev); pti.isValid(); ++pti)
819 {
820 int gid = pti.index();
821 const auto& ptile = pc.ParticlesAt(lev, pti);
822 const auto& ptd = ptile.getConstParticleTileData();
823 const int np = ptile.numParticles();
824
825 ReduceOps<ReduceOpSum> reduce_op;
826 ReduceData<int> reduce_data(reduce_op);
827 using ReduceTuple = typename decltype(reduce_data)::Type;
828
829 reduce_op.eval(np, reduce_data,
830 [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple
831 {
832 return (ptd.id(i).is_valid()) ? 1 : 0;
833 });
834
835 int np_valid = amrex::get<0>(reduce_data.value(reduce_op));
836 np_per_grid_local[lev][gid] += np_valid;
837 }
838 }
839
840 Vector<Vector<Long> > np_per_grid_global(pc.finestLevel()+1);
841 Long total_np = 0;
842 Vector<Long> np_per_level(pc.finestLevel()+1);
843 for (int lev = 0; lev <= pc.finestLevel(); lev++)
844 {
845 np_per_grid_global[lev].resize(np_per_grid_local[lev].size());
847 np_per_grid_global[lev],
848 IOProcNumber);
849 np_per_level[lev] = std::accumulate(np_per_grid_global[lev].begin(),
850 np_per_grid_global[lev].end(), 0L);
851 total_np += np_per_level[lev];
852 }
853
854 std::string pdir = dir;
855 if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; }
856 pdir += name;
857
858 if (MyProc == IOProcNumber)
859 {
860 if ( ! pc.GetLevelDirectoriesCreated())
861 {
862 if ( ! amrex::UtilCreateDirectory(pdir, 0755))
863 {
865 }
866 }
867
868 for (int lev = 0; lev <= pc.finestLevel(); lev++)
869 {
870 std::string LevelDir = pdir;
871 bool gotsome = np_per_level[lev];
872
873 if (gotsome)
874 {
875 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
876
877 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
878
879 if ( ! pc.GetLevelDirectoriesCreated())
880 {
881 if ( ! amrex::UtilCreateDirectory(LevelDir, 0755))
882 {
884 }
885 }
886
887 std::string HeaderFileName = LevelDir;
888 HeaderFileName += "/Particle_H";
889 std::ofstream ParticleHeader(HeaderFileName);
890
891 pc.ParticleBoxArray(lev).writeOn(ParticleHeader);
892 ParticleHeader << '\n';
893
894 ParticleHeader.flush();
895 ParticleHeader.close();
896 }
897 }
898 }
900
901 Long maxnextid = PC::ParticleType::NextID();
902 ParallelDescriptor::ReduceLongMax(maxnextid, IOProcNumber);
903
904 Vector<Long> np_on_rank(NProcs, 0L);
905 std::size_t psize = particle_detail::PSizeInFile<ParticleReal>(write_real_comp, write_int_comp);
906 Vector<int64_t> rank_start_offset(NProcs);
907 if (MyProc == IOProcNumber)
908 {
909 for (int lev = 0; lev <= pc.finestLevel(); lev++)
910 {
911 for (int k = 0; k < pc.ParticleBoxArray(lev).size(); ++k)
912 {
913 int rank = pc.ParticleDistributionMap(lev)[k];
914 np_on_rank[rank] += np_per_grid_global[lev][k];
915 }
916 }
917
918 for (int ip = 0; ip < NProcs; ++ip)
919 {
920 auto info = AsyncOut::GetWriteInfo(ip);
921 rank_start_offset[ip] = (info.ispot == 0) ? 0 : static_cast<int64_t>(rank_start_offset[ip-1] + np_on_rank[ip-1]*psize);
922 }
923 }
924
925 // make tmp particle tiles in pinned memory to write
926 using PinnedPTile = std::conditional_t<
927 PC::is_rtsoa_pc,
928 typename PC::ParticleTileType,
930 >;
931 auto myptiles = std::make_shared<Vector<std::map<std::pair<int, int>,PinnedPTile> > >();
932 myptiles->resize(pc.finestLevel()+1);
933 for (int lev = 0; lev <= pc.finestLevel(); lev++)
934 {
935 for (MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
936 {
937 auto& new_ptile = (*myptiles)[lev][std::make_pair(mfi.index(),
938 mfi.LocalTileIndex())];
939
940 if (np_per_grid_local[lev][mfi.index()] > 0)
941 {
942 const auto& ptile = pc.ParticlesAt(lev, mfi);
943
944 const auto np = np_per_grid_local[lev][mfi.index()];
945
946 const auto runtime_real_comps = ptile.NumRuntimeRealComps();
947 const auto runtime_int_comps = ptile.NumRuntimeIntComps();
948
949 new_ptile.define(runtime_real_comps, runtime_int_comps,
950 nullptr, nullptr, The_Pinned_Arena());
951
952 new_ptile.resize(np);
953
954 amrex::filterParticles(new_ptile, ptile, KeepValidFilter());
955 }
956 }
957 }
958
959 int finest_level = pc.finestLevel();
962 for (int lev = 0; lev <= pc.finestLevel(); lev++)
963 {
964 bas.push_back(pc.ParticleBoxArray(lev));
965 dms.push_back(pc.ParticleDistributionMap(lev));
966 }
967
968 int nic = pc.NumIntComps();
969 int rnames_size = (int) real_comp_names.size();
970
971 auto RD = pc.ParticleRealDescriptor;
972
973 AsyncOut::Submit([=] ()
974#if defined(__GNUC__) && (__GNUC__ == 8) && (__GNUC_MINOR__ == 1)
975 mutable // workaround for bug in gcc 8.1
976#endif
977 {
978 if (MyProc == IOProcNumber)
979 {
980 std::string HdrFileName = pdir;
981 std::ofstream HdrFile;
982
983 if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
984 HdrFileName += '/';
985 }
986
987 HdrFileName += "Header";
988
989 HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
990
991 if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); }
992
993 std::string version_string = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion();
994 if (sizeof(typename PC::ParticleType::RealType) == 4)
995 {
996 HdrFile << version_string << "_single" << '\n';
997 }
998 else
999 {
1000 HdrFile << version_string << "_double" << '\n';
1001 }
1002
1003 int num_output_real = 0;
1004 for (int i = 0; i < rnames_size; ++i) {
1005 if (write_real_comp[i]) { ++num_output_real; }
1006 }
1007
1008 int num_output_int = 0;
1009 for (int i = 0; i < nic + NStructInt; ++i) {
1010 if (write_int_comp[i]) { ++num_output_int; }
1011 }
1012
1013 // AMREX_SPACEDIM and N for sanity checking.
1014 HdrFile << AMREX_SPACEDIM << '\n';
1015
1016 // The number of extra real parameters
1017 HdrFile << num_output_real << '\n';
1018
1019 // Real component names
1020 for (int i = 0; i < rnames_size; ++i ) {
1021 if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; }
1022 }
1023
1024 // The number of extra int parameters
1025 HdrFile << num_output_int << '\n';
1026
1027 // int component names
1028 for (int i = 0; i < NStructInt + nic; ++i ) {
1029 if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; }
1030 }
1031
1032 bool is_checkpoint_legacy = true; // legacy
1033 HdrFile << is_checkpoint_legacy << '\n';
1034
1035 // The total number of particles.
1036 HdrFile << total_np << '\n';
1037
1038 // The value of nextid that we need to restore on restart.
1039 HdrFile << maxnextid << '\n';
1040
1041 // Then the finest level of the AMR hierarchy.
1042 HdrFile << finest_level << '\n';
1043
1044 // Then the number of grids at each level.
1045 for (int lev = 0; lev <= finest_level; lev++) {
1046 HdrFile << dms[lev].size() << '\n';
1047 }
1048
1049 for (int lev = 0; lev <= finest_level; lev++)
1050 {
1051 Vector<int64_t> grid_offset(NProcs, 0);
1052 for (int k = 0; k < bas[lev].size(); ++k)
1053 {
1054 int rank = dms[lev][k];
1055 auto info = AsyncOut::GetWriteInfo(rank);
1056 HdrFile << info.ifile << ' '
1057 << np_per_grid_global[lev][k] << ' '
1058 << grid_offset[rank] + rank_start_offset[rank] << '\n';
1059 grid_offset[rank] += static_cast<int64_t>(np_per_grid_global[lev][k]*psize);
1060 }
1061 }
1062
1063 HdrFile.flush();
1064 HdrFile.close();
1065 if ( ! HdrFile.good())
1066 {
1067 amrex::Abort("amrex::WriteBinaryParticleDataAsync(): problem writing HdrFile");
1068 }
1069 }
1070
1071 AsyncOut::Wait(); // Wait for my turn
1072
1073 for (int lev = 0; lev <= finest_level; lev++)
1074 {
1075 // For a each grid, the tiles it contains
1076 std::map<int, Vector<int> > tile_map;
1077
1078 for (const auto& kv : (*myptiles)[lev])
1079 {
1080 const int grid = kv.first.first;
1081 const int tile = kv.first.second;
1082 tile_map[grid].push_back(tile);
1083 }
1084
1085 std::string LevelDir = pdir;
1086 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
1087 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
1088 std::string filePrefix(LevelDir);
1089 filePrefix += '/';
1090 filePrefix += PC::DataPrefix();
1091 auto info = AsyncOut::GetWriteInfo(MyProc);
1092 std::string file_name = amrex::Concatenate(filePrefix, info.ifile, 5);
1093 std::ofstream ofs;
1094 ofs.open(file_name.c_str(), (info.ispot == 0) ? (std::ios::binary | std::ios::trunc)
1095 : (std::ios::binary | std::ios::app));
1096
1097 for (int k = 0; k < bas[lev].size(); ++k)
1098 {
1099 int rank = dms[lev][k];
1100 if (rank != MyProc) { continue; }
1101 const int grid = k;
1102 if (np_per_grid_local[lev][grid] == 0) { continue; }
1103
1104 // First write out the integer data in binary.
1105 int num_output_int = 0;
1106 for (int i = 0; i < nic + NStructInt; ++i) {
1107 if (write_int_comp[i]) { ++num_output_int; }
1108 }
1109
1110 const Long iChunkSize = 2 + num_output_int;
1111 Vector<int> istuff(np_per_grid_local[lev][grid]*iChunkSize);
1112 int* iptr = istuff.dataPtr();
1113
1114 for (unsigned i = 0; i < tile_map[grid].size(); i++) {
1115 auto ptile_index = std::make_pair(grid, tile_map[grid][i]);
1116 const auto& pbox = (*myptiles)[lev][ptile_index];
1117 const auto& ptd = pbox.getConstParticleTileData();
1118 for (int pindex = 0; pindex < pbox.numParticles(); ++pindex)
1119 {
1120 if (!ptd.id(pindex).is_valid()) { continue; }
1121
1122 particle_detail::iPackParticleData(ptd, pindex, iptr,
1123 write_int_comp.dataPtr(), is_checkpoint);
1124 iptr += iChunkSize;
1125 }
1126 }
1127
1128 writeIntData(istuff.dataPtr(), istuff.size(), ofs);
1129 ofs.flush(); // Some systems require this flush() (probably due to a bug)
1130
1131 // Write the Real data in binary.
1132 int num_output_real = 0;
1133 for (int i = 0; i < rnames_size; ++i) {
1134 if (write_real_comp[i]) { ++num_output_real; }
1135 }
1136
1137 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
1138 Vector<typename PC::ParticleType::RealType> rstuff(np_per_grid_local[lev][grid]*rChunkSize);
1139 typename PC::ParticleType::RealType* rptr = rstuff.dataPtr();
1140
1141 for (unsigned i = 0; i < tile_map[grid].size(); i++) {
1142 auto ptile_index = std::make_pair(grid, tile_map[grid][i]);
1143 const auto& pbox = (*myptiles)[lev][ptile_index];
1144 const auto& ptd = pbox.getConstParticleTileData();
1145 for (int pindex = 0; pindex < pbox.numParticles(); ++pindex)
1146 {
1147 if (!ptd.id(pindex).is_valid()) { continue; }
1148
1149 particle_detail::rPackParticleData(ptd, pindex, rptr,
1150 write_real_comp.dataPtr());
1151 rptr += rChunkSize;
1152 }
1153 }
1154
1155 if (sizeof(typename PC::ParticleType::RealType) == 4) {
1156 writeFloatData((float*) rstuff.dataPtr(), rstuff.size(), ofs, RD);
1157 }
1158 else if (sizeof(typename PC::ParticleType::RealType) == 8) {
1159 writeDoubleData((double*) rstuff.dataPtr(), rstuff.size(), ofs, RD);
1160 }
1161
1162 ofs.flush(); // Some systems require this flush() (probably due to a bug)
1163 }
1164 }
1165 AsyncOut::Notify(); // Notify others I am done
1166 });
1167}
1168
1169}
1170
1171#ifdef AMREX_USE_HDF5
1173#endif
1174
1175#endif /*AMREX_WRITE_BINARY_PARTICLE_DATA_H*/
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:855
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:85
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:169
int index() const noexcept
The index into the underlying BoxArray of the current FAB.
Definition AMReX_MFIter.H:172
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
This class encapsulates writing to nfiles.
Definition AMReX_NFiles.H:27
bool ReadyToWrite(bool appendFirst=false)
if appendFirst is true, the first set for this iterator will open the files in append mode
Definition AMReX_NFiles.cpp:204
const std::string & FileName() const
Definition AMReX_NFiles.H:160
Definition AMReX_ParIter.H:118
Parse Parameters From Command Line and Input Files.
Definition AMReX_ParmParse.H:348
int queryAdd(std::string_view name, T &ref)
If name is found, the value in the ParmParse database will be stored in the ref argument....
Definition AMReX_ParmParse.H:1044
Definition AMReX_Reduce.H:453
Type value()
Definition AMReX_Reduce.H:488
Definition AMReX_Reduce.H:612
std::enable_if_t< IsFabArray< MF >::value > eval(MF const &mf, IntVect const &nghost, D &reduce_data, F &&f)
Definition AMReX_Reduce.H:746
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
T * dataPtr() noexcept
get access to the underlying data pointer
Definition AMReX_Vector.H:49
Long size() const noexcept
Definition AMReX_Vector.H:53
static bool GetBarrierAfterLevel()
Definition AMReX_VisMF.H:286
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
amrex_long Long
Definition AMReX_INT.H:30
Arena * The_Pinned_Arena()
Definition AMReX_Arena.cpp:845
int MyProc() noexcept
Definition AMReX_ParallelDescriptor.H:128
void Barrier(const std::string &)
Definition AMReX_ParallelDescriptor.cpp:1215
void ReduceIntSum(int &)
Definition AMReX_ParallelDescriptor.cpp:1265
int NProcs() noexcept
Definition AMReX_ParallelDescriptor.H:255
void ReduceLongSum(Long &)
Definition AMReX_ParallelDescriptor.cpp:1236
int IOProcessorNumber() noexcept
The MPI rank number of the I/O Processor (probably rank 0). This rank is usually used to write to std...
Definition AMReX_ParallelDescriptor.H:279
void ReduceLongMax(Long &)
Definition AMReX_ParallelDescriptor.cpp:1237
bool IOProcessor() noexcept
Is this CPU the I/O Processor? To get the rank number, call IOProcessorNumber()
Definition AMReX_ParallelDescriptor.H:289
WriteInfo GetWriteInfo(int rank)
Definition AMReX_AsyncOut.cpp:72
void Wait()
Definition AMReX_AsyncOut.cpp:112
void Notify()
Definition AMReX_AsyncOut.cpp:127
void Submit(std::function< void()> &&a_f)
Definition AMReX_AsyncOut.cpp:95
bool Remove(std::string const &filename)
Definition AMReX_FileSystem.cpp:190
__host__ __device__ void * memcpy(void *dest, const void *src, std::size_t count)
Definition AMReX_GpuUtility.H:220
void GatherLayoutDataToVector(const LayoutData< T > &sendbuf, Vector< T > &recvbuf, int root)
Gather LayoutData values to a vector on root.
Definition AMReX_ParallelDescriptor.H:1295
Definition AMReX_Amr.cpp:49
void writeIntData(const From *data, std::size_t size, std::ostream &os, const amrex::IntDescriptor &id)
Definition AMReX_IntConv.H:23
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:139
void WriteBinaryParticleDataSync(PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, F const &f, bool is_checkpoint)
Definition AMReX_WriteBinaryParticleData.H:485
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:137
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
void writeFloatData(const float *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native32RealDescriptor())
Definition AMReX_VectorIO.cpp:114
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
std::string Concatenate(const std::string &root, int num, int mindigits)
Returns rootNNNN where NNNN == num.
Definition AMReX_String.cpp:34
Index filterParticles(DstTile &dst, const SrcTile &src, const Index *mask) noexcept
Conditionally copy particles from src to dst based on the value of mask.
Definition AMReX_ParticleTransformation.H:393
void CreateDirectoryFailed(const std::string &dir)
Output a message and abort when couldn't create the directory.
Definition AMReX_Utility.cpp:129
RandomEngine getInvalidRandomEngine()
Definition AMReX_RandomEngine.H:86
bool UtilCreateDirectory(const std::string &path, mode_t mode, bool verbose=false)
Creates the specified directories. path may be either a full pathname or a relative pathname....
Definition AMReX_Utility.cpp:116
void writeDoubleData(const double *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native64RealDescriptor())
Definition AMReX_VectorIO.cpp:126
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
const int[]
Definition AMReX_BLProfiler.cpp:1664
void WriteBinaryParticleDataAsync(PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, bool is_checkpoint)
Definition AMReX_WriteBinaryParticleData.H:784
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG(T n, L const &f) noexcept
Definition AMReX_GpuLaunchFunctsC.H:1231
Definition AMReX_WriteBinaryParticleData.H:13
__host__ __device__ int operator()(const SrcData &src, int i) const noexcept
Definition AMReX_WriteBinaryParticleData.H:16
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
MFInfo & SetAlloc(bool a) noexcept
Definition AMReX_FabArray.H:73
Definition AMReX_ParticleTile.H:723
void resize(std::size_t count, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_ParticleTile.H:971
Definition AMReX_RandomEngine.H:72