Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_WriteBinaryParticleData.H
Go to the documentation of this file.
1#ifndef AMREX_WRITE_BINARY_PARTICLE_DATA_H
2#define AMREX_WRITE_BINARY_PARTICLE_DATA_H
3#include <AMReX_Config.H>
4
5#include <AMReX_TypeTraits.H>
7#include <AMReX_GpuDevice.H>
8#include <AMReX_VisMF.H>
9
10namespace amrex {
11
13{
14 template <typename SrcData>
16 int operator() (const SrcData& src, int i) const noexcept
17 {
18 return (src.id(i).is_valid());
19 }
20};
21
23namespace particle_detail {
24
25template <typename ParticleReal>
26std::size_t PSizeInFile (const Vector<int>& wrc, const Vector<int>& wic)
27{
28 std::size_t rsize = sizeof(ParticleReal)*std::accumulate(wrc.begin(), wrc.end(), 0);
29 std::size_t isize = sizeof(int)*std::accumulate(wic.begin(), wic.end(), 0);
30 return rsize + isize + AMREX_SPACEDIM*sizeof(ParticleReal) + 2*sizeof(int);
31}
32
33template <class Container,
34 class PTile,
35 class F>
36void
37fillFlagsGpu (Container& pflags, const PTile& ptile, F const& f)
38{
39 const auto& ptd = ptile.getConstParticleTileData();
40 const auto np = ptile.numParticles();
41 pflags.resize(np, 0);
42 auto flag_ptr = pflags.data();
44 [=] AMREX_GPU_DEVICE (int k, amrex::RandomEngine const& engine) noexcept
45 {
46 const auto p = ptd.getSuperParticle(k);
47 amrex::ignore_unused(flag_ptr, f, engine);
48 if constexpr (IsCallable<F,decltype(p),RandomEngine>::value) {
49 flag_ptr[k] = f(p,engine);
50 } else if constexpr (IsCallable<F,decltype(p)>::value) {
51 flag_ptr[k] = f(p);
52 } else if constexpr (IsCallable<F,decltype(ptd),int,RandomEngine>::value) {
53 flag_ptr[k] = f(ptd,k,engine);
54 } else {
55 flag_ptr[k] = f(ptd,k);
56 }
57 });
58}
59
60template <class Container,
61 class PTile,
62 class F>
63void
64fillFlagsCpu (Container& pflags, const PTile& ptile, F const& f)
65{
66 const auto& ptd = ptile.getConstParticleTileData();
67 const auto np = ptile.numParticles();
68 pflags.resize(np, 0);
69 auto flag_ptr = pflags.data();
70 for (int k = 0; k < np; ++k) {
71 const auto p = ptd.getSuperParticle(k);
72 if constexpr (IsCallable<F,decltype(p),RandomEngine>::value) {
73 flag_ptr[k] = f(p,getInvalidRandomEngine());
74 } else if constexpr (IsCallable<F,decltype(p)>::value) {
75 flag_ptr[k] = f(p);
76 } else if constexpr (IsCallable<F,decltype(ptd),int,RandomEngine>::value) {
77 flag_ptr[k] = f(ptd,k,getInvalidRandomEngine());
78 } else {
79 flag_ptr[k] = f(ptd,k);
80 }
81 }
82}
83
84template <template <class, class> class Container,
85 class Allocator,
86 class PTile,
87 class F>
88void
89fillFlags (Container<int, Allocator>& pflags, const PTile& ptile, F const& f)
90{
91 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
92 if (pflags.arena()->isManaged() || pflags.arena()->isDevice()) {
93 fillFlagsGpu(pflags, ptile, f);
94 } else {
95 fillFlagsCpu(pflags, ptile, f);
96 }
97 } else {
98 if constexpr (RunOnGpu<Allocator>::value) {
99 fillFlagsGpu(pflags, ptile, f);
100 } else {
101 fillFlagsCpu(pflags, ptile, f);
102 }
103 }
104}
105
106template <class Container, class PC>
108countFlagsGpu (const Vector<std::map<std::pair<int,int>,Container>>& particle_io_flags, const PC& pc)
109{
110 ReduceOps<ReduceOpSum> reduce_op;
111 ReduceData<Long> reduce_data(reduce_op);
112 using ReduceTuple = typename decltype(reduce_data)::Type;
113
114 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
115 {
116 const auto& pmap = pc.GetParticles(lev);
117 for (const auto& kv : pmap)
118 {
119 const auto& pflags = particle_io_flags[lev].at(kv.first);
120 const auto flag_ptr = pflags.data();
121 reduce_op.eval(pflags.size(), reduce_data,
122 [=] AMREX_GPU_DEVICE (const int i) -> ReduceTuple
123 {
124 return flag_ptr[i] ? 1 : 0;
125 });
126 }
127 }
128 ReduceTuple hv = reduce_data.value(reduce_op);
129 return amrex::get<0>(hv);
130}
131
132template <class Container>
134countFlagsGpu (const Container& pflags)
135{
136 ReduceOps<ReduceOpSum> reduce_op;
137 ReduceData<Long> reduce_data(reduce_op);
138 using ReduceTuple = typename decltype(reduce_data)::Type;
139
140 const auto flag_ptr = pflags.data();
141 reduce_op.eval(pflags.size(), reduce_data,
142 [=] AMREX_GPU_DEVICE (const amrex::Long i) -> ReduceTuple
143 {
144 return flag_ptr[i] ? 1 : 0;
145 });
146 ReduceTuple hv = reduce_data.value(reduce_op);
147 return amrex::get<0>(hv);
148}
149
150template <class Container, class PC>
152countFlagsCpu (const Vector<std::map<std::pair<int,int>,Container>>& particle_io_flags, const PC& pc)
153{
154 amrex::Long nparticles = 0;
155 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
156 {
157 const auto& pmap = pc.GetParticles(lev);
158 for (const auto& kv : pmap)
159 {
160 const auto& pflags = particle_io_flags[lev].at(kv.first);
161 for (int k = 0; k < kv.second.numParticles(); ++k)
162 {
163 if (pflags[k]) { nparticles++; }
164 }
165 }
166 }
167 return nparticles;
168}
169
170template <class Container>
172countFlagsCpu (const Container& pflags)
173{
174 amrex::Long nparticles = 0;
175 for (std::size_t k = 0; k < pflags.size(); ++k)
176 {
177 if (pflags[k]) { nparticles++; }
178 }
179 return nparticles;
180}
181
182template <template <class, class> class Container, class Allocator, class PC>
184countFlags (const Vector<std::map<std::pair<int,int>,Container<int,Allocator>>>& particle_io_flags, const PC& pc)
185{
186 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
187 if (pc.arena()->isManaged() || pc.arena()->isDevice()) {
188 return countFlagsGpu(particle_io_flags, pc);
189 } else {
190 return countFlagsCpu(particle_io_flags, pc);
191 }
192 } else {
193 if constexpr (RunOnGpu<Allocator>::value) {
194 return countFlagsGpu(particle_io_flags, pc);
195 } else {
196 return countFlagsCpu(particle_io_flags, pc);
197 }
198 }
199}
200
201template <template <class, class> class Container, class Allocator>
203countFlags (const Container<int,Allocator>& pflags)
204{
205 if constexpr (IsPolymorphicArenaAllocator<Allocator>::value) {
206 if (pflags.arena()->isManaged() || pflags.arena()->isDevice()) {
207 return countFlagsGpu(pflags);
208 } else {
209 return countFlagsCpu(pflags);
210 }
211 } else {
212 if constexpr (RunOnGpu<Allocator>::value) {
213 return countFlagsGpu(pflags);
214 } else {
215 return countFlagsCpu(pflags);
216 }
217 }
218}
219
220template <typename I>
222void packParticleIDs (I* idata, const std::uint64_t idcpu, bool is_checkpoint) noexcept
223{
224 if (is_checkpoint) {
225 std::int32_t xi, yi;
226 std::uint32_t xu, yu;
227 xu = (std::uint32_t)((idcpu & 0xFFFFFFFF00000000LL) >> 32);
228 yu = (std::uint32_t)( idcpu & 0xFFFFFFFFLL);
229 amrex::Gpu::memcpy(&xi, &xu, sizeof(xu));
230 amrex::Gpu::memcpy(&yi, &yu, sizeof(yu));
231 idata[0] = xi;
232 idata[1] = yi;
233 } else {
234 idata[0] = ConstParticleIDWrapper{idcpu};
235 idata[1] = ConstParticleCPUWrapper{idcpu};
236 }
237}
238
239template<class PTD>
241rPackParticleData (const PTD& ptd, int idx, typename PTD::RealType * rdata_ptr,
242 const int * write_real_comp)
243{
244 std::size_t rout_index = 0;
245
246 for (int j = 0; j < AMREX_SPACEDIM; ++j) {
247 rdata_ptr[rout_index] = ptd.pos(j, idx);
248 rout_index++;
249 }
250
251 if constexpr (!PTD::ParticleType::is_soa_particle) {
252 const auto& p = ptd[idx];
253
254 for (int j = 0; j < PTD::ParticleType::NReal; ++j) {
255 if (write_real_comp[j]) {
256 rdata_ptr[rout_index] = p.rdata(j);
257 rout_index++;
258 }
259 }
260 }
261
262 constexpr int real_start_offset = PTD::ParticleType::is_soa_particle ? AMREX_SPACEDIM : 0;
263
264 if constexpr (PTD::NAR > 0) {
265 for (int j = real_start_offset; j < PTD::NAR; ++j) {
266 if (write_real_comp[PTD::ParticleType::NReal + j - real_start_offset]) {
267 rdata_ptr[rout_index] = ptd.rdata(j)[idx];
268 rout_index++;
269 }
270 }
271 }
272
273 for (int j = 0; j < ptd.m_num_runtime_real; ++j) {
274 if (write_real_comp[PTD::ParticleType::NReal + PTD::NAR + j - real_start_offset]) {
275 rdata_ptr[rout_index] = ptd.m_runtime_rdata[j][idx];
276 rout_index++;
277 }
278 }
279}
280
281template<class PTD>
283iPackParticleData (const PTD& ptd, int idx, typename PTD::IntType * idata_ptr,
284 const int * write_int_comp, bool is_checkpoint)
285{
286 std::size_t iout_index = 0;
287
288 packParticleIDs(&idata_ptr[iout_index], ptd.idcpu(idx), is_checkpoint);
289 iout_index += 2;
290
291 if constexpr (!PTD::ParticleType::is_soa_particle) {
292 const auto& p = ptd[idx];
293
294 for (int j = 0; j < PTD::ParticleType::NInt; ++j) {
295 if (write_int_comp[j]) {
296 idata_ptr[iout_index] = p.idata(j);
297 iout_index++;
298 }
299 }
300 }
301
302 if constexpr (PTD::NAI > 0) {
303 for (int j = 0; j < PTD::NAI; ++j) {
304 if (write_int_comp[PTD::ParticleType::NInt + j]) {
305 idata_ptr[iout_index] = ptd.idata(j)[idx];
306 iout_index++;
307 }
308 }
309 }
310
311 for (int j = 0; j < ptd.m_num_runtime_int; ++j) {
312 if (write_int_comp[PTD::ParticleType::NInt + PTD::NAI + j]) {
313 idata_ptr[iout_index] = ptd.m_runtime_idata[j][idx];
314 iout_index++;
315 }
316 }
317}
318
319template <class PC>
320void
321packIODataGpu (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
322 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
323 const Vector<std::map<std::pair<int, int>, typename PC::IntVector>>& particle_io_flags,
324 const Vector<int>& tiles, int np, bool is_checkpoint)
325{
326 int num_output_int = 0;
327 for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) {
328 if (write_int_comp[i]) { ++num_output_int; }
329 }
330
331 const Long iChunkSize = 2 + num_output_int;
332 idata.resize(np*iChunkSize);
333
334 int num_output_real = 0;
335 for (int i : write_real_comp) {
336 if (i) { ++num_output_real; }
337 }
338
339 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
340 rdata.resize(np*rChunkSize);
341
342 Gpu::DeviceVector<int> write_int_comp_d(write_int_comp.size());
343 Gpu::DeviceVector<int> write_real_comp_d(write_real_comp.size());
344 Gpu::copyAsync(Gpu::hostToDevice, write_int_comp.begin(), write_int_comp.end(),
345 write_int_comp_d.begin());
346 Gpu::copyAsync(Gpu::hostToDevice, write_real_comp.begin(), write_real_comp.end(),
347 write_real_comp_d.begin());
348
349 const auto* write_int_comp_d_ptr = write_int_comp_d.data();
350 const auto* write_real_comp_d_ptr = write_real_comp_d.data();
351
352 std::size_t poffset = 0;
353 for (int tile : tiles) {
354 const auto& ptile = pc.ParticlesAt(lev, grid, tile);
355 const auto& pflags = particle_io_flags[lev].at(std::make_pair(grid, tile));
356 int np_tile = ptile.numParticles();
357 Gpu::DeviceVector<int> offsets(np_tile);
358 int num_copies = Scan::ExclusiveSum(np_tile, pflags.begin(), offsets.begin(), Scan::retSum);
359
360 Gpu::DeviceVector<int> idata_d(num_copies*iChunkSize);
361 Gpu::DeviceVector<ParticleReal> rdata_d(num_copies*rChunkSize);
362
363 const auto* flag_ptr = pflags.data();
364 const auto* offset_ptr = offsets.data();
365
366 auto* idata_d_ptr = idata_d.data();
367 auto* rdata_d_ptr = rdata_d.data();
368
369 const auto& ptd = ptile.getConstParticleTileData();
370 amrex::ParallelFor(ptile.numParticles(),
371 [=] AMREX_GPU_DEVICE (int pindex) noexcept
372 {
373 if (flag_ptr[pindex]) {
374 const int out_indx = offset_ptr[pindex];
375 iPackParticleData(ptd, pindex, idata_d_ptr + out_indx * iChunkSize,
376 write_int_comp_d_ptr, is_checkpoint);
377
378 rPackParticleData(ptd, pindex, rdata_d_ptr + out_indx * rChunkSize,
379 write_real_comp_d_ptr);
380 }
381 });
382
383 Gpu::copyAsync(Gpu::deviceToHost, idata_d.begin(), idata_d.end(),
384 idata.begin() + static_cast<Long>(poffset));
385 Gpu::copyAsync(Gpu::deviceToHost, rdata_d.begin(), rdata_d.end(),
386 rdata.begin() + static_cast<Long>(poffset));
387 Gpu::Device::streamSynchronize();
388
389 poffset += num_copies;
390 }
391}
392
393template <class PC>
394void
395packIODataCpu (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
396 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
397 const Vector<std::map<std::pair<int, int>, typename PC::IntVector>>& particle_io_flags,
398 const Vector<int>& tiles, int np, bool is_checkpoint)
399{
400 int num_output_int = 0;
401 for (int i = 0; i < pc.NumIntComps() + PC::NStructInt; ++i) {
402 if (write_int_comp[i]) { ++num_output_int; }
403 }
404
405 const Long iChunkSize = 2 + num_output_int;
406 idata.resize(np*iChunkSize);
407
408 int num_output_real = 0;
409 for (int i : write_real_comp) {
410 if (i) { ++num_output_real; }
411 }
412
413 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
414 rdata.resize(np*rChunkSize);
415
416 int* iptr = idata.dataPtr();
417 ParticleReal* rptr = rdata.dataPtr();
418 for (int tile : tiles) {
419 const auto& ptile = pc.ParticlesAt(lev, grid, tile);
420 const auto& pflags = particle_io_flags[lev].at(std::make_pair(grid, tile));
421 const auto& ptd = ptile.getConstParticleTileData();
422
423 for (int pindex = 0; pindex < ptile.numParticles(); ++pindex) {
424 if (pflags[pindex]) {
425 iPackParticleData(ptd, pindex, iptr,
426 write_int_comp.dataPtr(), is_checkpoint);
427 iptr += iChunkSize;
428
429 rPackParticleData(ptd, pindex, rptr,
430 write_real_comp.dataPtr());
431 rptr += rChunkSize;
432 }
433 }
434 }
435}
436
437template <class PC>
438void
439packIOData (Vector<int>& idata, Vector<ParticleReal>& rdata, const PC& pc, int lev, int grid,
440 const Vector<int>& write_real_comp, const Vector<int>& write_int_comp,
441 const Vector<std::map<std::pair<int, int>, typename PC::IntVector>>& particle_io_flags,
442 const Vector<int>& tiles, int np, bool is_checkpoint)
443{
444 if constexpr (IsPolymorphicArenaAllocator<typename PC::IntVector::allocator_type>::value) {
445 if (pc.arena()->isManaged() || pc.arena()->isDevice()) {
446 packIODataGpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
447 particle_io_flags, tiles, np, is_checkpoint);
448 } else {
449 packIODataCpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
450 particle_io_flags, tiles, np, is_checkpoint);
451 }
452 } else {
453 if constexpr (RunOnGpu<typename PC::IntVector::allocator_type>::value) {
454 packIODataGpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
455 particle_io_flags, tiles, np, is_checkpoint);
456 } else {
457 packIODataCpu(idata, rdata, pc, lev, grid, write_real_comp, write_int_comp,
458 particle_io_flags, tiles, np, is_checkpoint);
459 }
460 }
461}
462
463}
465
466template <class PC, class F, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
468 const std::string& dir, const std::string& name,
469 const Vector<int>& write_real_comp,
470 const Vector<int>& write_int_comp,
471 const Vector<std::string>& real_comp_names,
472 const Vector<std::string>& int_comp_names,
473 F const& f, bool is_checkpoint)
474{
475 BL_PROFILE("WriteBinaryParticleData()");
476 AMREX_ASSERT(pc.OK());
477
478 AMREX_ASSERT(sizeof(typename PC::ParticleType::RealType) == 4 ||
479 sizeof(typename PC::ParticleType::RealType) == 8);
480
481 constexpr int NStructReal = PC::NStructReal;
482 constexpr int NStructInt = PC::NStructInt;
483
484 const int NProcs = ParallelDescriptor::NProcs();
485 const int IOProcNumber = ParallelDescriptor::IOProcessorNumber();
486
487 if constexpr(PC::ParticleType::is_soa_particle) {
488 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal - AMREX_SPACEDIM); // pure SoA: skip positions
489 } else {
490 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal);
491 }
492 AMREX_ALWAYS_ASSERT( int_comp_names.size() == pc.NumIntComps() + NStructInt);
493
494 std::string pdir = dir;
495 if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; }
496 pdir += name;
497
498 if ( ! pc.GetLevelDirectoriesCreated()) {
500 {
501 if ( ! amrex::UtilCreateDirectory(pdir, 0755))
502 {
504 }
505 }
507 }
508
509 std::ofstream HdrFile;
510
511 Long nparticles = 0;
512 Long maxnextid;
513
514 // evaluate f for every particle to determine which ones to output
515 Vector<std::map<std::pair<int, int>, typename PC::IntVector > >
516 particle_io_flags(pc.GetParticles().size());
517 for (int lev = 0; lev < pc.GetParticles().size(); lev++)
518 {
519 const auto& pmap = pc.GetParticles(lev);
520 for (const auto& kv : pmap)
521 {
522 auto& flags = particle_io_flags[lev][kv.first];
523 if constexpr (PC::has_polymorphic_allocator) {
524 flags.setArena(pc.arena());
525 }
526 particle_detail::fillFlags(flags, kv.second, f);
527 }
528 }
529
531
532 if(pc.GetUsePrePost())
533 {
534 nparticles = pc.GetNParticlesPrePost();
535 maxnextid = pc.GetMaxNextIDPrePost();
536 }
537 else
538 {
539 nparticles = particle_detail::countFlags(particle_io_flags, pc);
540 maxnextid = PC::ParticleType::NextID();
541 ParallelDescriptor::ReduceLongSum(nparticles, IOProcNumber);
542 PC::ParticleType::NextID(maxnextid);
543 ParallelDescriptor::ReduceLongMax(maxnextid, IOProcNumber);
544 }
545
547 {
548 std::string HdrFileName = pdir;
549
550 if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
551 HdrFileName += '/';
552 }
553
554 HdrFileName += "Header";
555 pc.HdrFileNamePrePost = HdrFileName;
556
557 HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
558
559 if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); }
560
561 //
562 // First thing written is our version string.
563 // We append "_single" or "_double" to the version string indicating
564 // whether we're using "float" or "double" floating point data.
565 //
566 std::string version_string = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion();
567 if (sizeof(typename PC::ParticleType::RealType) == 4)
568 {
569 HdrFile << version_string << "_single" << '\n';
570 }
571 else
572 {
573 HdrFile << version_string << "_double" << '\n';
574 }
575
576 int num_output_real = 0;
577 for (int i : write_real_comp) {
578 if (i) { ++num_output_real; }
579 }
580
581 int num_output_int = 0;
582 for (int i = 0; i < pc.NumIntComps() + NStructInt; ++i) {
583 if (write_int_comp[i]) { ++num_output_int; }
584 }
585
586 // AMREX_SPACEDIM and N for sanity checking.
587 HdrFile << AMREX_SPACEDIM << '\n';
588
589 // The number of extra real parameters
590 HdrFile << num_output_real << '\n';
591
592 // Real component names
593 for (int i = 0; i < (int) real_comp_names.size(); ++i ) {
594 if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; }
595 }
596
597 // The number of extra int parameters
598 HdrFile << num_output_int << '\n';
599
600 // int component names
601 for (int i = 0; i < NStructInt + pc.NumIntComps(); ++i ) {
602 if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; }
603 }
604
605 bool is_checkpoint_legacy = true; // legacy
606 HdrFile << is_checkpoint_legacy << '\n';
607
608 // The total number of particles.
609 HdrFile << nparticles << '\n';
610
611 // The value of nextid that we need to restore on restart.
612 HdrFile << maxnextid << '\n';
613
614 // Then the finest level of the AMR hierarchy.
615 HdrFile << pc.finestLevel() << '\n';
616
617 // Then the number of grids at each level.
618 for (int lev = 0; lev <= pc.finestLevel(); lev++) {
619 HdrFile << pc.ParticleBoxArray(lev).size() << '\n';
620 }
621 }
622
623 // We want to write the data out in parallel.
624 // We'll allow up to nOutFiles active writers at a time.
625 int nOutFiles(256);
626
627 ParmParse pp("particles");
628 pp.queryAdd("particles_nfiles",nOutFiles);
629 if(nOutFiles == -1) { nOutFiles = NProcs; }
630 nOutFiles = std::max(1, std::min(nOutFiles,NProcs));
631 pc.nOutFilesPrePost = nOutFiles;
632
633 for (int lev = 0; lev <= pc.finestLevel(); lev++)
634 {
635 bool gotsome;
636 if(pc.usePrePost)
637 {
638 gotsome = (pc.nParticlesAtLevelPrePost[lev] > 0);
639 }
640 else
641 {
642 gotsome = (pc.NumberOfParticlesAtLevel(lev) > 0);
643 }
644
645 // We store the particles at each level in their own subdirectory.
646 std::string LevelDir = pdir;
647
648 if (gotsome)
649 {
650 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
651
652 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
653
654 if ( ! pc.GetLevelDirectoriesCreated())
655 {
657 if ( ! amrex::UtilCreateDirectory(LevelDir, 0755)) {
659 }
660 }
662 }
663 }
664
665 // Write out the header for each particle
666 if (gotsome && ParallelDescriptor::IOProcessor()) {
667 std::string HeaderFileName = LevelDir;
668 HeaderFileName += "/Particle_H";
669 std::ofstream ParticleHeader(HeaderFileName);
670
671 pc.ParticleBoxArray(lev).writeOn(ParticleHeader);
672 ParticleHeader << '\n';
673
674 ParticleHeader.flush();
675 ParticleHeader.close();
676 }
677
678 MFInfo info;
679 info.SetAlloc(false);
680 MultiFab state(pc.ParticleBoxArray(lev),
681 pc.ParticleDistributionMap(lev),
682 1,0,info);
683
684 // We eventually want to write out the file name and the offset
685 // into that file into which each grid of particles is written.
686 Vector<int> which(state.size(),0);
687 Vector<int > count(state.size(),0);
688 Vector<Long> where(state.size(),0);
689
690 std::string filePrefix(LevelDir);
691 filePrefix += '/';
692 filePrefix += PC::DataPrefix();
693 if(pc.usePrePost) {
694 pc.filePrefixPrePost[lev] = filePrefix;
695 }
696 bool groupSets(false), setBuf(true);
697
698 if (gotsome)
699 {
700 for(NFilesIter nfi(nOutFiles, filePrefix, groupSets, setBuf); nfi.ReadyToWrite(); ++nfi)
701 {
702 auto& myStream = (std::ofstream&) nfi.Stream();
703 pc.WriteParticles(lev, myStream, nfi.FileNumber(), which, count, where,
704 write_real_comp, write_int_comp, particle_io_flags, is_checkpoint);
705 }
706
707 if(pc.usePrePost) {
708 pc.whichPrePost[lev] = which;
709 pc.countPrePost[lev] = count;
710 pc.wherePrePost[lev] = where;
711 } else {
712 ParallelDescriptor::ReduceIntSum (which.dataPtr(), static_cast<int>(which.size()), IOProcNumber);
713 ParallelDescriptor::ReduceIntSum (count.dataPtr(), static_cast<int>(count.size()), IOProcNumber);
714 ParallelDescriptor::ReduceLongSum(where.dataPtr(), static_cast<int>(where.size()), IOProcNumber);
715 }
716 }
717
719 {
720 if(pc.GetUsePrePost()) {
721 // ---- write to the header and unlink in CheckpointPost
722 } else {
723 for (int j = 0; j < state.size(); j++)
724 {
725 HdrFile << which[j] << ' ' << count[j] << ' ' << where[j] << '\n';
726 }
727
728 if (gotsome && pc.doUnlink)
729 {
730 // Unlink any zero-length data files.
731 Vector<Long> cnt(nOutFiles,0);
732
733 for (int i = 0, N=static_cast<int>(count.size()); i < N; i++) {
734 cnt[which[i]] += count[i];
735 }
736
737 for (int i = 0, N=static_cast<int>(cnt.size()); i < N; i++)
738 {
739 if (cnt[i] == 0)
740 {
741 std::string FullFileName = NFilesIter::FileName(i, filePrefix);
742 FileSystem::Remove(FullFileName);
743 }
744 }
745 }
746 }
747 }
750 }
751 }
752
754 {
755 HdrFile.flush();
756 HdrFile.close();
757 if ( ! HdrFile.good())
758 {
759 amrex::Abort("amrex::WriteBinaryParticleDataSync(): problem writing HdrFile");
760 }
761 }
762}
763
764template <class PC, std::enable_if_t<IsParticleContainer<PC>::value, int> foo = 0>
766 const std::string& dir, const std::string& name,
767 const Vector<int>& write_real_comp,
768 const Vector<int>& write_int_comp,
769 const Vector<std::string>& real_comp_names,
770 const Vector<std::string>& int_comp_names, bool is_checkpoint)
771{
772 BL_PROFILE("WriteBinaryParticleDataAsync");
773 AMREX_ASSERT(pc.OK());
774
775 AMREX_ASSERT(sizeof(typename PC::ParticleType::RealType) == 4 ||
776 sizeof(typename PC::ParticleType::RealType) == 8);
777
778 constexpr int NStructReal = PC::NStructReal;
779 constexpr int NStructInt = PC::NStructInt;
780 constexpr int NArrayReal = PC::NArrayReal;
781 constexpr int NArrayInt = PC::NArrayInt;
782
783 const int MyProc = ParallelDescriptor::MyProc();
784 const int NProcs = ParallelDescriptor::NProcs();
785 const int IOProcNumber = NProcs - 1;
786
787 if constexpr(PC::ParticleType::is_soa_particle) {
788 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal - AMREX_SPACEDIM); // pure SoA: skip positions
789 } else {
790 AMREX_ALWAYS_ASSERT(real_comp_names.size() == pc.NumRealComps() + NStructReal);
791 }
792 AMREX_ALWAYS_ASSERT( int_comp_names.size() == pc.NumIntComps() + NStructInt);
793
794 Vector<LayoutData<Long> > np_per_grid_local(pc.finestLevel()+1);
795 for (int lev = 0; lev <= pc.finestLevel(); lev++)
796 {
797 np_per_grid_local[lev].define(pc.ParticleBoxArray(lev), pc.ParticleDistributionMap(lev));
798 using ParIter = typename PC::ParConstIterType;
799 for (ParIter pti(pc, lev); pti.isValid(); ++pti)
800 {
801 int gid = pti.index();
802 const auto& ptile = pc.ParticlesAt(lev, pti);
803 const auto& ptd = ptile.getConstParticleTileData();
804 const int np = ptile.numParticles();
805
806 ReduceOps<ReduceOpSum> reduce_op;
807 ReduceData<int> reduce_data(reduce_op);
808 using ReduceTuple = typename decltype(reduce_data)::Type;
809
810 reduce_op.eval(np, reduce_data,
811 [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple
812 {
813 return (ptd.id(i).is_valid()) ? 1 : 0;
814 });
815
816 int np_valid = amrex::get<0>(reduce_data.value(reduce_op));
817 np_per_grid_local[lev][gid] += np_valid;
818 }
819 }
820
821 Vector<Vector<Long> > np_per_grid_global(pc.finestLevel()+1);
822 Long total_np = 0;
823 Vector<Long> np_per_level(pc.finestLevel()+1);
824 for (int lev = 0; lev <= pc.finestLevel(); lev++)
825 {
826 np_per_grid_global[lev].resize(np_per_grid_local[lev].size());
828 np_per_grid_global[lev],
829 IOProcNumber);
830 np_per_level[lev] = std::accumulate(np_per_grid_global[lev].begin(),
831 np_per_grid_global[lev].end(), 0L);
832 total_np += np_per_level[lev];
833 }
834
835 std::string pdir = dir;
836 if ( ! pdir.empty() && pdir[pdir.size()-1] != '/') { pdir += '/'; }
837 pdir += name;
838
839 if (MyProc == IOProcNumber)
840 {
841 if ( ! pc.GetLevelDirectoriesCreated())
842 {
843 if ( ! amrex::UtilCreateDirectory(pdir, 0755))
844 {
846 }
847 }
848
849 for (int lev = 0; lev <= pc.finestLevel(); lev++)
850 {
851 std::string LevelDir = pdir;
852 bool gotsome = np_per_level[lev];
853
854 if (gotsome)
855 {
856 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
857
858 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
859
860 if ( ! pc.GetLevelDirectoriesCreated())
861 {
862 if ( ! amrex::UtilCreateDirectory(LevelDir, 0755))
863 {
865 }
866 }
867
868 std::string HeaderFileName = LevelDir;
869 HeaderFileName += "/Particle_H";
870 std::ofstream ParticleHeader(HeaderFileName);
871
872 pc.ParticleBoxArray(lev).writeOn(ParticleHeader);
873 ParticleHeader << '\n';
874
875 ParticleHeader.flush();
876 ParticleHeader.close();
877 }
878 }
879 }
881
882 Long maxnextid = PC::ParticleType::NextID();
883 ParallelDescriptor::ReduceLongMax(maxnextid, IOProcNumber);
884
885 Vector<Long> np_on_rank(NProcs, 0L);
886 std::size_t psize = particle_detail::PSizeInFile<ParticleReal>(write_real_comp, write_int_comp);
887 Vector<int64_t> rank_start_offset(NProcs);
888 if (MyProc == IOProcNumber)
889 {
890 for (int lev = 0; lev <= pc.finestLevel(); lev++)
891 {
892 for (int k = 0; k < pc.ParticleBoxArray(lev).size(); ++k)
893 {
894 int rank = pc.ParticleDistributionMap(lev)[k];
895 np_on_rank[rank] += np_per_grid_global[lev][k];
896 }
897 }
898
899 for (int ip = 0; ip < NProcs; ++ip)
900 {
901 auto info = AsyncOut::GetWriteInfo(ip);
902 rank_start_offset[ip] = (info.ispot == 0) ? 0 : static_cast<int64_t>(rank_start_offset[ip-1] + np_on_rank[ip-1]*psize);
903 }
904 }
905
906 // make tmp particle tiles in pinned memory to write
907 using PinnedPTile = ParticleTile<typename PC::ParticleType, NArrayReal, NArrayInt,
909 auto myptiles = std::make_shared<Vector<std::map<std::pair<int, int>,PinnedPTile> > >();
910 myptiles->resize(pc.finestLevel()+1);
911 for (int lev = 0; lev <= pc.finestLevel(); lev++)
912 {
913 for (MFIter mfi = pc.MakeMFIter(lev); mfi.isValid(); ++mfi)
914 {
915 auto& new_ptile = (*myptiles)[lev][std::make_pair(mfi.index(),
916 mfi.LocalTileIndex())];
917
918 if (np_per_grid_local[lev][mfi.index()] > 0)
919 {
920 const auto& ptile = pc.ParticlesAt(lev, mfi);
921
922 const auto np = np_per_grid_local[lev][mfi.index()];
923
924 const auto runtime_real_comps = ptile.NumRuntimeRealComps();
925 const auto runtime_int_comps = ptile.NumRuntimeIntComps();
926
927 new_ptile.define(runtime_real_comps, runtime_int_comps,
928 nullptr, nullptr, The_Pinned_Arena());
929
930 new_ptile.resize(np);
931
932 amrex::filterParticles(new_ptile, ptile, KeepValidFilter());
933 }
934 }
935 }
936
937 int finest_level = pc.finestLevel();
940 for (int lev = 0; lev <= pc.finestLevel(); lev++)
941 {
942 bas.push_back(pc.ParticleBoxArray(lev));
943 dms.push_back(pc.ParticleDistributionMap(lev));
944 }
945
946 int nic = pc.NumIntComps();
947 int rnames_size = (int) real_comp_names.size();
948
949 auto RD = pc.ParticleRealDescriptor;
950
951 AsyncOut::Submit([=] ()
952#if defined(__GNUC__) && (__GNUC__ == 8) && (__GNUC_MINOR__ == 1)
953 mutable // workaround for bug in gcc 8.1
954#endif
955 {
956 if (MyProc == IOProcNumber)
957 {
958 std::string HdrFileName = pdir;
959 std::ofstream HdrFile;
960
961 if ( ! HdrFileName.empty() && HdrFileName[HdrFileName.size()-1] != '/') {
962 HdrFileName += '/';
963 }
964
965 HdrFileName += "Header";
966
967 HdrFile.open(HdrFileName.c_str(), std::ios::out|std::ios::trunc);
968
969 if ( ! HdrFile.good()) { amrex::FileOpenFailed(HdrFileName); }
970
971 std::string version_string = is_checkpoint ? PC::CheckpointVersion() : PC::PlotfileVersion();
972 if (sizeof(typename PC::ParticleType::RealType) == 4)
973 {
974 HdrFile << version_string << "_single" << '\n';
975 }
976 else
977 {
978 HdrFile << version_string << "_double" << '\n';
979 }
980
981 int num_output_real = 0;
982 for (int i = 0; i < rnames_size; ++i) {
983 if (write_real_comp[i]) { ++num_output_real; }
984 }
985
986 int num_output_int = 0;
987 for (int i = 0; i < nic + NStructInt; ++i) {
988 if (write_int_comp[i]) { ++num_output_int; }
989 }
990
991 // AMREX_SPACEDIM and N for sanity checking.
992 HdrFile << AMREX_SPACEDIM << '\n';
993
994 // The number of extra real parameters
995 HdrFile << num_output_real << '\n';
996
997 // Real component names
998 for (int i = 0; i < rnames_size; ++i ) {
999 if (write_real_comp[i]) { HdrFile << real_comp_names[i] << '\n'; }
1000 }
1001
1002 // The number of extra int parameters
1003 HdrFile << num_output_int << '\n';
1004
1005 // int component names
1006 for (int i = 0; i < NStructInt + nic; ++i ) {
1007 if (write_int_comp[i]) { HdrFile << int_comp_names[i] << '\n'; }
1008 }
1009
1010 bool is_checkpoint_legacy = true; // legacy
1011 HdrFile << is_checkpoint_legacy << '\n';
1012
1013 // The total number of particles.
1014 HdrFile << total_np << '\n';
1015
1016 // The value of nextid that we need to restore on restart.
1017 HdrFile << maxnextid << '\n';
1018
1019 // Then the finest level of the AMR hierarchy.
1020 HdrFile << finest_level << '\n';
1021
1022 // Then the number of grids at each level.
1023 for (int lev = 0; lev <= finest_level; lev++) {
1024 HdrFile << dms[lev].size() << '\n';
1025 }
1026
1027 for (int lev = 0; lev <= finest_level; lev++)
1028 {
1029 Vector<int64_t> grid_offset(NProcs, 0);
1030 for (int k = 0; k < bas[lev].size(); ++k)
1031 {
1032 int rank = dms[lev][k];
1033 auto info = AsyncOut::GetWriteInfo(rank);
1034 HdrFile << info.ifile << ' '
1035 << np_per_grid_global[lev][k] << ' '
1036 << grid_offset[rank] + rank_start_offset[rank] << '\n';
1037 grid_offset[rank] += static_cast<int64_t>(np_per_grid_global[lev][k]*psize);
1038 }
1039 }
1040
1041 HdrFile.flush();
1042 HdrFile.close();
1043 if ( ! HdrFile.good())
1044 {
1045 amrex::Abort("amrex::WriteBinaryParticleDataAsync(): problem writing HdrFile");
1046 }
1047 }
1048
1049 AsyncOut::Wait(); // Wait for my turn
1050
1051 for (int lev = 0; lev <= finest_level; lev++)
1052 {
1053 // For a each grid, the tiles it contains
1054 std::map<int, Vector<int> > tile_map;
1055
1056 for (const auto& kv : (*myptiles)[lev])
1057 {
1058 const int grid = kv.first.first;
1059 const int tile = kv.first.second;
1060 tile_map[grid].push_back(tile);
1061 }
1062
1063 std::string LevelDir = pdir;
1064 if ( ! LevelDir.empty() && LevelDir[LevelDir.size()-1] != '/') { LevelDir += '/'; }
1065 LevelDir = amrex::Concatenate(LevelDir.append("Level_"), lev, 1);
1066 std::string filePrefix(LevelDir);
1067 filePrefix += '/';
1068 filePrefix += PC::DataPrefix();
1069 auto info = AsyncOut::GetWriteInfo(MyProc);
1070 std::string file_name = amrex::Concatenate(filePrefix, info.ifile, 5);
1071 std::ofstream ofs;
1072 ofs.open(file_name.c_str(), (info.ispot == 0) ? (std::ios::binary | std::ios::trunc)
1073 : (std::ios::binary | std::ios::app));
1074
1075 for (int k = 0; k < bas[lev].size(); ++k)
1076 {
1077 int rank = dms[lev][k];
1078 if (rank != MyProc) { continue; }
1079 const int grid = k;
1080 if (np_per_grid_local[lev][grid] == 0) { continue; }
1081
1082 // First write out the integer data in binary.
1083 int num_output_int = 0;
1084 for (int i = 0; i < nic + NStructInt; ++i) {
1085 if (write_int_comp[i]) { ++num_output_int; }
1086 }
1087
1088 const Long iChunkSize = 2 + num_output_int;
1089 Vector<int> istuff(np_per_grid_local[lev][grid]*iChunkSize);
1090 int* iptr = istuff.dataPtr();
1091
1092 for (unsigned i = 0; i < tile_map[grid].size(); i++) {
1093 auto ptile_index = std::make_pair(grid, tile_map[grid][i]);
1094 const auto& pbox = (*myptiles)[lev][ptile_index];
1095 const auto& ptd = pbox.getConstParticleTileData();
1096 for (int pindex = 0; pindex < pbox.numParticles(); ++pindex)
1097 {
1098 if (!ptd.id(pindex).is_valid()) { continue; }
1099
1100 particle_detail::iPackParticleData(ptd, pindex, iptr,
1101 write_int_comp.dataPtr(), is_checkpoint);
1102 iptr += iChunkSize;
1103 }
1104 }
1105
1106 writeIntData(istuff.dataPtr(), istuff.size(), ofs);
1107 ofs.flush(); // Some systems require this flush() (probably due to a bug)
1108
1109 // Write the Real data in binary.
1110 int num_output_real = 0;
1111 for (int i = 0; i < rnames_size; ++i) {
1112 if (write_real_comp[i]) { ++num_output_real; }
1113 }
1114
1115 const Long rChunkSize = AMREX_SPACEDIM + num_output_real;
1116 Vector<typename PC::ParticleType::RealType> rstuff(np_per_grid_local[lev][grid]*rChunkSize);
1117 typename PC::ParticleType::RealType* rptr = rstuff.dataPtr();
1118
1119 for (unsigned i = 0; i < tile_map[grid].size(); i++) {
1120 auto ptile_index = std::make_pair(grid, tile_map[grid][i]);
1121 const auto& pbox = (*myptiles)[lev][ptile_index];
1122 const auto& ptd = pbox.getConstParticleTileData();
1123 for (int pindex = 0; pindex < pbox.numParticles(); ++pindex)
1124 {
1125 if (!ptd.id(pindex).is_valid()) { continue; }
1126
1127 particle_detail::rPackParticleData(ptd, pindex, rptr,
1128 write_real_comp.dataPtr());
1129 rptr += rChunkSize;
1130 }
1131 }
1132
1133 if (sizeof(typename PC::ParticleType::RealType) == 4) {
1134 writeFloatData((float*) rstuff.dataPtr(), rstuff.size(), ofs, RD);
1135 }
1136 else if (sizeof(typename PC::ParticleType::RealType) == 8) {
1137 writeDoubleData((double*) rstuff.dataPtr(), rstuff.size(), ofs, RD);
1138 }
1139
1140 ofs.flush(); // Some systems require this flush() (probably due to a bug)
1141 }
1142 }
1143 AsyncOut::Notify(); // Notify others I am done
1144 });
1145}
1146
1147}
1148
1149#ifdef AMREX_USE_HDF5
1151#endif
1152
1153#endif /*AMREX_WRITE_BINARY_PARTICLE_DATA_H*/
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:757
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:85
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:169
int index() const noexcept
The index into the underlying BoxArray of the current FAB.
Definition AMReX_MFIter.H:172
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
This class encapsulates writing to nfiles.
Definition AMReX_NFiles.H:27
bool ReadyToWrite(bool appendFirst=false)
if appendFirst is true, the first set for this iterator will open the files in append mode
Definition AMReX_NFiles.cpp:204
const std::string & FileName() const
Definition AMReX_NFiles.H:160
Definition AMReX_ParIter.H:115
Parse Parameters From Command Line and Input Files.
Definition AMReX_ParmParse.H:346
int queryAdd(std::string_view name, T &ref)
If name is found, the value in the ParmParse database will be stored in the ref argument....
Definition AMReX_ParmParse.H:1040
Definition AMReX_GpuAllocators.H:150
Definition AMReX_Reduce.H:257
Type value()
Definition AMReX_Reduce.H:289
Definition AMReX_Reduce.H:389
std::enable_if_t< IsFabArray< MF >::value > eval(MF const &mf, IntVect const &nghost, D &reduce_data, F &&f)
Definition AMReX_Reduce.H:458
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
T * dataPtr() noexcept
get access to the underlying data pointer
Definition AMReX_Vector.H:49
Long size() const noexcept
Definition AMReX_Vector.H:53
static bool GetBarrierAfterLevel()
Definition AMReX_VisMF.H:286
amrex_particle_real ParticleReal
Floating Point Type for Particles.
Definition AMReX_REAL.H:90
amrex_long Long
Definition AMReX_INT.H:30
Arena * The_Pinned_Arena()
Definition AMReX_Arena.cpp:823
int MyProc() noexcept
Definition AMReX_ParallelDescriptor.H:128
void Barrier(const std::string &)
Definition AMReX_ParallelDescriptor.cpp:1215
void ReduceIntSum(int &)
Definition AMReX_ParallelDescriptor.cpp:1265
int NProcs() noexcept
Definition AMReX_ParallelDescriptor.H:255
void ReduceLongSum(Long &)
Definition AMReX_ParallelDescriptor.cpp:1236
int IOProcessorNumber() noexcept
The MPI rank number of the I/O Processor (probably rank 0). This rank is usually used to write to std...
Definition AMReX_ParallelDescriptor.H:279
void ReduceLongMax(Long &)
Definition AMReX_ParallelDescriptor.cpp:1237
bool IOProcessor() noexcept
Is this CPU the I/O Processor? To get the rank number, call IOProcessorNumber()
Definition AMReX_ParallelDescriptor.H:289
WriteInfo GetWriteInfo(int rank)
Definition AMReX_AsyncOut.cpp:72
void Wait()
Definition AMReX_AsyncOut.cpp:112
void Notify()
Definition AMReX_AsyncOut.cpp:127
void Submit(std::function< void()> &&a_f)
Definition AMReX_AsyncOut.cpp:95
bool Remove(std::string const &filename)
Definition AMReX_FileSystem.cpp:190
__host__ __device__ void * memcpy(void *dest, const void *src, std::size_t count)
Definition AMReX_GpuUtility.H:220
void GatherLayoutDataToVector(const LayoutData< T > &sendbuf, Vector< T > &recvbuf, int root)
Gather LayoutData values to a vector on root.
Definition AMReX_ParallelDescriptor.H:1295
Definition AMReX_Amr.cpp:49
void writeIntData(const From *data, std::size_t size, std::ostream &os, const amrex::IntDescriptor &id)
Definition AMReX_IntConv.H:23
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
void WriteBinaryParticleDataSync(PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, F const &f, bool is_checkpoint)
Definition AMReX_WriteBinaryParticleData.H:467
void FileOpenFailed(const std::string &file)
Output a message and abort when couldn't open the file.
Definition AMReX_Utility.cpp:137
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
void writeFloatData(const float *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native32RealDescriptor())
Definition AMReX_VectorIO.cpp:114
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
std::string Concatenate(const std::string &root, int num, int mindigits)
Returns rootNNNN where NNNN == num.
Definition AMReX_String.cpp:34
Index filterParticles(DstTile &dst, const SrcTile &src, const Index *mask) noexcept
Conditionally copy particles from src to dst based on the value of mask.
Definition AMReX_ParticleTransformation.H:330
void CreateDirectoryFailed(const std::string &dir)
Output a message and abort when couldn't create the directory.
Definition AMReX_Utility.cpp:129
RandomEngine getInvalidRandomEngine()
Definition AMReX_RandomEngine.H:86
bool UtilCreateDirectory(const std::string &path, mode_t mode, bool verbose=false)
Creates the specified directories. path may be either a full pathname or a relative pathname....
Definition AMReX_Utility.cpp:116
void writeDoubleData(const double *data, std::size_t size, std::ostream &os, const RealDescriptor &rd=FPC::Native64RealDescriptor())
Definition AMReX_VectorIO.cpp:126
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
const int[]
Definition AMReX_BLProfiler.cpp:1664
void WriteBinaryParticleDataAsync(PC const &pc, const std::string &dir, const std::string &name, const Vector< int > &write_real_comp, const Vector< int > &write_int_comp, const Vector< std::string > &real_comp_names, const Vector< std::string > &int_comp_names, bool is_checkpoint)
Definition AMReX_WriteBinaryParticleData.H:765
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
AMREX_ATTRIBUTE_FLATTEN_FOR void ParallelForRNG(T n, L const &f) noexcept
Definition AMReX_GpuLaunchFunctsC.H:1274
Definition AMReX_WriteBinaryParticleData.H:13
__host__ __device__ int operator()(const SrcData &src, int i) const noexcept
Definition AMReX_WriteBinaryParticleData.H:16
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
MFInfo & SetAlloc(bool a) noexcept
Definition AMReX_FabArray.H:73
Definition AMReX_ParticleTile.H:721
Definition AMReX_RandomEngine.H:72