Block-Structured AMR Software Framework
 
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
AMReX_PODVector.H
Go to the documentation of this file.
1#ifndef AMREX_PODVECTOR_H_
2#define AMREX_PODVECTOR_H_
3#include <AMReX_Config.H>
4
5#include <AMReX.H>
6#include <AMReX_Arena.H>
7#include <AMReX_GpuLaunch.H>
9#include <AMReX_GpuDevice.H>
10#include <AMReX_MemPool.H>
11#include <AMReX_TypeTraits.H>
12
13#include <iterator>
14#include <type_traits>
15#include <utility>
16#include <memory>
17#include <cstring>
18
19namespace amrex
20{
21 namespace detail
22 {
23 template <typename T, typename Size, template<class> class Allocator>
24 FatPtr<T> allocate_in_place ([[maybe_unused]] T* p, [[maybe_unused]] Size nmin, Size nmax,
25 Allocator<T>& allocator)
26 {
27 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
28 return allocator.allocate_in_place(p, nmin, nmax);
29 } else {
30 T* pnew = allocator.allocate(nmax);
31 return {pnew, nmax};
32 }
33 }
34
35 template <typename T, typename Size, template<class> class Allocator>
36 T* shrink_in_place ([[maybe_unused]] T* p, Size n, Allocator<T>& allocator)
37 {
38 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
39 return allocator.shrink_in_place(p, n);
40 } else {
41 return allocator.allocate(n);
42 }
43 }
44
45 template <typename T, typename Size, template<class> class Allocator>
46 void uninitializedFillNImpl (T* data, Size count, const T& value,
47 [[maybe_unused]] Allocator<T> const& allocator)
48 {
49#ifdef AMREX_USE_GPU
50#ifdef _WIN32
51 if (RunOnGpu<Allocator<T>>::value)
52#else
53 if constexpr (RunOnGpu<Allocator<T>>::value)
54#endif
55 {
56 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
57 data[i] = value;
58 });
60 return;
61 }
62#ifdef _WIN32
63 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
64#else
65 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
66#endif
67 {
68 if (allocator.arena()->isManaged() ||
69 allocator.arena()->isDevice())
70 {
71 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
72 {
73 data[i] = value;
74 });
76 return;
77 }
78 }
79#endif
80 std::uninitialized_fill_n(data, count, value);
81 }
82
83 template <typename T, template<class> class Allocator>
84 void initFromListImpl (T* data, std::initializer_list<T> const& list,
85 [[maybe_unused]] Allocator<T> const & allocator)
86 {
87 auto count = list.size() * sizeof(T);
88#ifdef AMREX_USE_GPU
89 if constexpr (RunOnGpu<Allocator<T>>::value)
90 {
91 Gpu::htod_memcpy_async(data, std::data(list), count);
93 return;
94 }
95 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
96 {
97 if (allocator.arena()->isManaged() ||
98 allocator.arena()->isDevice())
99 {
100 Gpu::htod_memcpy_async(data, std::data(list), count);
102 return;
103 }
104 }
105#endif
106 std::memcpy(data, std::data(list), count);
107 }
108
109 template <typename T, typename Size, template<class> class Allocator>
110 void fillValuesImpl (T* dst, T const* src, Size count,
111 [[maybe_unused]] Allocator<T> const& allocator)
112 {
113#ifdef AMREX_USE_GPU
114#ifdef _WIN32
115 if (RunOnGpu<Allocator<T>>::value)
116#else
117 if constexpr (RunOnGpu<Allocator<T>>::value)
118#endif
119 {
120 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
121 dst[i] = src[i];
122 });
124 return;
125 }
126#ifdef _WIN32
127 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
128#else
129 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
130#endif
131 {
132 if (allocator.arena()->isManaged() ||
133 allocator.arena()->isDevice())
134 {
135 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
136 {
137 dst[i] = src[i];
138 });
140 return;
141 }
142 }
143#else
144 static_assert(RunOnGpu<Allocator<T>>::value == false);
145#endif
146 if constexpr (! RunOnGpu<Allocator<T>>::value) {
147 for (Size i = 0; i < count; ++i) { dst[i] = src[i]; }
148 }
149 }
150
151 template <typename Allocator>
152 void memCopyImpl (void* dst, const void* src, std::size_t count,
153 [[maybe_unused]] Allocator const& dst_allocator,
154 [[maybe_unused]] Allocator const& src_allocator,
155 [[maybe_unused]] bool sync = true)
156 {
157#ifdef AMREX_USE_GPU
158 if constexpr (RunOnGpu<Allocator>::value)
159 {
160 Gpu::dtod_memcpy_async(dst, src, count);
161 if (sync) { Gpu::streamSynchronize(); }
162 return;
163 }
165 {
166 bool dst_on_device = dst_allocator.arena()->isManaged() ||
167 dst_allocator.arena()->isDevice();
168 bool src_on_device = src_allocator.arena()->isManaged() ||
169 src_allocator.arena()->isDevice();
170 if (dst_on_device || src_on_device)
171 {
172 if (dst_on_device && src_on_device) {
173 Gpu::dtod_memcpy_async(dst, src, count);
174 } else if (dst_on_device) {
175 Gpu::htod_memcpy_async(dst, src, count);
176 } else {
177 Gpu::dtoh_memcpy_async(dst, src, count);
178 }
179 if (sync) { Gpu::streamSynchronize(); }
180 return;
181 }
182 }
183#endif
184 std::memcpy(dst, src, count);
185 }
186
187 template <typename Allocator>
188 void memMoveImpl (void* dst, const void* src, std::size_t count,
189 [[maybe_unused]] Allocator const& allocator)
190 {
191#ifdef AMREX_USE_GPU
192 if constexpr (RunOnGpu<Allocator>::value)
193 {
194 auto* tmp = The_Arena()->alloc(count);
195 Gpu::dtod_memcpy_async(tmp, src, count);
196 Gpu::dtod_memcpy_async(dst, tmp, count);
198 The_Arena()->free(tmp);
199 return;
200 }
202 {
203 if (allocator.arena()->isManaged() ||
204 allocator.arena()->isDevice())
205 {
206 auto* tmp = The_Arena()->alloc(count);
207 Gpu::dtod_memcpy_async(tmp, src, count);
208 Gpu::dtod_memcpy_async(dst, tmp, count);
210 The_Arena()->free(tmp);
211 return;
212 }
213 }
214#endif
215 std::memmove(dst, src, count);
216 }
217
218 template <typename T, typename Size, template<class> class Allocator>
219 void maybe_init_snan (T* data, Size count, Allocator<T> const& allocator)
220 {
221 amrex::ignore_unused(data, count, allocator);
222 if constexpr (std::is_same_v<float, std::remove_cv_t<T>> ||
223 std::is_same_v<double, std::remove_cv_t<T>>) {
224 if (amrex::InitSNaN()) {
225#ifdef AMREX_USE_GPU
226 if constexpr (RunOnGpu<Allocator<T>>::value) {
227 amrex::fill_snan<RunOn::Device>(data, count);
229 return;
230 } else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value) {
231 if (allocator.arena()->isManaged() ||
232 allocator.arena()->isDevice())
233 {
234 amrex::fill_snan<RunOn::Device>(data, count);
236 return;
237 }
238 }
239#endif
240 amrex::fill_snan<RunOn::Host>(data, count);
241 }
242 }
243 }
244 }
245
246 namespace VectorGrowthStrategy
247 {
248 extern AMREX_EXPORT Real growth_factor;
249 inline Real GetGrowthFactor () { return growth_factor; }
250 inline void SetGrowthFactor (Real a_factor);
251
252 namespace detail
253 {
254 void ValidateUserInput ();
255 }
256
257 void Initialize ();
258 }
259
260 template <class T, class Allocator = std::allocator<T> >
261 class PODVector : public Allocator
262 {
263 // static_assert(std::is_standard_layout<T>(), "PODVector can only hold standard layout types");
264 static_assert(std::is_trivially_copyable<T>(), "PODVector can only hold trivially copyable types");
265 // static_assert(std::is_trivially_default_constructible<T>(), "PODVector can only hold trivial dc types");
266
267 using Allocator::allocate;
268 using Allocator::deallocate;
269
270 public:
271 using value_type = T;
272 using allocator_type = Allocator;
273 using size_type = std::size_t;
274 using difference_type = std::ptrdiff_t;
275
276 using reference = T&;
277 using pointer = T*;
278 using iterator = T*;
279 using reverse_iterator = std::reverse_iterator<iterator>;
280
281 using const_reference = const T&;
282 using const_pointer = const T*;
283 using const_iterator = const T*;
284 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
285
286 private:
287 pointer m_data = nullptr;
289
290 public:
291 constexpr PODVector () noexcept = default;
292
293 constexpr explicit PODVector (const allocator_type& a_allocator) noexcept
294 : Allocator(a_allocator)
295 {}
296
297 explicit PODVector (size_type a_size)
298 : m_size(a_size), m_capacity(a_size)
299 {
300 if (a_size != 0) {
301 m_data = allocate(m_size);
302 detail::maybe_init_snan(m_data, m_size, (Allocator const&)(*this));
303 }
304 }
305
306 PODVector (size_type a_size, const value_type& a_value,
307 const allocator_type& a_allocator = Allocator())
308 : Allocator(a_allocator), m_size(a_size), m_capacity(a_size)
309 {
310 if (a_size != 0) {
311 m_data = allocate(m_size);
312 detail::uninitializedFillNImpl(m_data, a_size, a_value,
313 (Allocator const&)(*this));
314 }
315 }
316
317 PODVector (std::initializer_list<T> a_initializer_list,
318 const allocator_type& a_allocator = Allocator())
319 : Allocator(a_allocator),
320 m_size (a_initializer_list.size()),
321 m_capacity(a_initializer_list.size())
322 {
323 if (a_initializer_list.size() != 0) {
324 m_data = allocate(m_size);
325 detail::initFromListImpl(m_data, a_initializer_list,
326 (Allocator const&)(*this));
327 }
328 }
329
331 : Allocator(a_vector),
332 m_size (a_vector.size()),
333 m_capacity(a_vector.size())
334 {
335 if (a_vector.size() != 0) {
336 m_data = allocate(m_size);
337 detail::memCopyImpl(m_data, a_vector.m_data, a_vector.nBytes(),
338 (Allocator const&)(*this),
339 (Allocator const&)a_vector);
340 }
341 }
342
343 PODVector (PODVector<T, Allocator>&& a_vector) noexcept
344 : Allocator(static_cast<Allocator&&>(a_vector)),
345 m_data(a_vector.m_data),
346 m_size(a_vector.m_size),
347 m_capacity(a_vector.m_capacity)
348 {
349 a_vector.m_data = nullptr;
350 a_vector.m_size = 0;
351 a_vector.m_capacity = 0;
352 }
353
355 {
356 // let's not worry about other allocators
357 static_assert(std::is_same<Allocator,std::allocator<T>>::value ||
359 if (m_data != nullptr) {
360 deallocate(m_data, capacity());
361 }
362 }
363
365 {
366 if (this == &a_vector) { return *this; }
367
368 if ((Allocator const&)(*this) != (Allocator const&)a_vector) {
369 if (m_data != nullptr) {
370 deallocate(m_data, m_capacity);
371 m_data = nullptr;
372 m_size = 0;
373 m_capacity = 0;
374 }
375 (Allocator&)(*this) = (Allocator const&)a_vector;
376 }
377
378 const auto other_size = a_vector.size();
379 if ( other_size > m_capacity ) {
380 clear();
381 reserve(other_size);
382 }
383
384 m_size = other_size;
385 if (m_size > 0) {
387 (Allocator const&)(*this),
388 (Allocator const&)a_vector);
389 }
390 return *this;
391 }
392
394 {
395 if (this == &a_vector) { return *this; }
396
397 if (static_cast<Allocator const&>(a_vector) ==
398 static_cast<Allocator const&>(*this))
399 {
400 if (m_data != nullptr) {
401 deallocate(m_data, m_capacity);
402 }
403
404 m_data = a_vector.m_data;
405 m_size = a_vector.m_size;
406 m_capacity = a_vector.m_capacity;
407
408 a_vector.m_data = nullptr;
409 a_vector.m_size = 0;
410 a_vector.m_capacity = 0;
411 }
412 else
413 {
414 // if the allocators are not the same we give up and copy
415 *this = a_vector; // must copy instead of move
416 }
417
418 return *this;
419 }
420
422 {
423 auto* pos = const_cast<iterator>(a_pos);
424 --m_size;
425 detail::memMoveImpl(pos, a_pos+1, (end() - pos)*sizeof(T),
426 (Allocator const&)(*this));
427 return pos;
428 }
429
431 {
432 size_type num_to_erase = a_last - a_first;
433 auto* first = const_cast<iterator>(a_first);
434 if (num_to_erase > 0) {
435 m_size -= num_to_erase;
436 detail::memMoveImpl(first, a_last, (end() - first)*sizeof(T),
437 (Allocator const&)(*this));
438 }
439 return first;
440 }
441
442 iterator insert (const_iterator a_pos, const T& a_item)
443 {
444 return insert(a_pos, 1, a_item);
445 }
446
447 iterator insert (const_iterator a_pos, size_type a_count, const T& a_value)
448 {
449 auto* pos = const_cast<iterator>(a_pos);
450 if (a_count > 0) {
451 if (m_capacity < m_size + a_count)
452 {
453 std::size_t insert_index = std::distance(m_data, pos);
454 AllocateBufferForInsert(insert_index, a_count);
455 pos = m_data + insert_index;
456 }
457 else
458 {
459 detail::memMoveImpl(pos+a_count, a_pos, (end() - pos) * sizeof(T),
460 (Allocator const&)(*this));
461 m_size += a_count;
462 }
463 detail::uninitializedFillNImpl(pos, a_count, a_value,
464 (Allocator const&)(*this));
465 }
466 return pos;
467 }
468
469 iterator insert (const_iterator a_pos, T&& a_item)
470 {
471 // This is *POD* vector after all
472 return insert(a_pos, 1, std::move(a_item));
473 }
474
476 std::initializer_list<T> a_initializer_list)
477 {
478 auto* pos = const_cast<iterator>(a_pos);
479 size_type count = a_initializer_list.size();
480 if (count > 0) {
481 if (m_capacity < m_size + count)
482 {
483 std::size_t insert_index = std::distance(m_data, pos);
484 AllocateBufferForInsert(insert_index, count);
485 pos = m_data + insert_index;
486 }
487 else
488 {
489 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
490 (Allocator const&)(*this));
491 m_size += count;
492 }
493 detail::initFromListImpl(pos, a_initializer_list,
494 (Allocator const&)(*this));
495 }
496 return pos;
497 }
498
499 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
500 iterator insert (const_iterator a_pos, InputIt a_first, InputIt a_last)
501 {
502 auto* pos = const_cast<iterator>(a_pos);
503 size_type count = std::distance(a_first, a_last);
504 if (count > 0) {
505 if (m_capacity < m_size + count)
506 {
507 std::size_t insert_index = std::distance(m_data, pos);
508 AllocateBufferForInsert(insert_index, count);
509 pos = m_data + insert_index;
510 }
511 else
512 {
513 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
514 (Allocator const&)(*this));
515 m_size += count;
516 }
517 // Unfortunately we don't know whether InputIt points
518 // GPU or CPU memory. We will assume it's the same as
519 // the vector.
520 detail::fillValuesImpl(pos, a_first, count,
521 (Allocator const&)(*this));
522 }
523 return pos;
524 }
525
526 void assign (size_type a_count, const T& a_value)
527 {
528 if ( a_count > m_capacity ) {
529 clear();
530 reserve(a_count);
531 }
532 m_size = a_count;
533 detail::uninitializedFillNImpl(m_data, a_count, a_value,
534 (Allocator const&)(*this));
535 }
536
537 void assign (std::initializer_list<T> a_initializer_list)
538 {
539 if (a_initializer_list.size() > m_capacity) {
540 clear();
541 reserve(a_initializer_list.size());
542 }
543 m_size = a_initializer_list.size();
544 detail::initFromListImpl(m_data, a_initializer_list,
545 (Allocator const&)(*this));
546 }
547
548 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
549 void assign (InputIt a_first, InputIt a_last)
550 {
551 std::size_t count = std::distance(a_first, a_last);
552 if (count > m_capacity) {
553 clear();
554 reserve(count);
555 }
556 m_size = count;
557 detail::fillValuesImpl(m_data, a_first, count,
558 (Allocator const&)(*this));
559 }
560
565 void assign (const T& a_value)
566 {
567 assign(m_size, a_value);
568 }
569
570 [[nodiscard]] allocator_type get_allocator () const noexcept { return *this; }
571
572 void push_back (const T& a_value)
573 {
574 if (m_size == m_capacity) {
575 auto new_capacity = GetNewCapacityForPush();
576 AllocateBufferForPush(new_capacity);
577 }
579 (Allocator const&)(*this));
580 ++m_size;
581 }
582
583 // Because T is trivial, there is no need for push_back(T&&)
584
585 // Don't have the emplace methods, but not sure how often we use those.
586
587 void pop_back () noexcept { --m_size; }
588
589 void clear () noexcept { m_size = 0; }
590
591 [[nodiscard]] size_type size () const noexcept { return m_size; }
592
593 [[nodiscard]] size_type capacity () const noexcept { return m_capacity; }
594
595 [[nodiscard]] bool empty () const noexcept { return m_size == 0; }
596
597 [[nodiscard]] T& operator[] (size_type a_index) noexcept { return m_data[a_index]; }
598
599 [[nodiscard]] const T& operator[] (size_type a_index) const noexcept { return m_data[a_index]; }
600
601 [[nodiscard]] T& front () noexcept { return *m_data; }
602
603 [[nodiscard]] const T& front () const noexcept { return *m_data; }
604
605 [[nodiscard]] T& back () noexcept { return *(m_data + m_size - 1); }
606
607 [[nodiscard]] const T& back () const noexcept { return *(m_data + m_size - 1); }
608
609 [[nodiscard]] T* data () noexcept { return m_data; }
610
611 [[nodiscard]] const T* data () const noexcept { return m_data; }
612
613 [[nodiscard]] T* dataPtr () noexcept { return m_data; }
614
615 [[nodiscard]] const T* dataPtr () const noexcept { return m_data; }
616
617 [[nodiscard]] iterator begin () noexcept { return m_data; }
618
619 [[nodiscard]] const_iterator begin () const noexcept { return m_data; }
620
621 [[nodiscard]] iterator end () noexcept { return m_data + m_size; }
622
623 [[nodiscard]] const_iterator end () const noexcept { return m_data + m_size; }
624
625 [[nodiscard]] reverse_iterator rbegin () noexcept { return reverse_iterator(end()); }
626
627 [[nodiscard]] const_reverse_iterator rbegin () const noexcept { return const_reverse_iterator(end()); }
628
629 [[nodiscard]] reverse_iterator rend () noexcept { return reverse_iterator(begin()); }
630
631 [[nodiscard]] const_reverse_iterator rend () const noexcept { return const_reverse_iterator(begin()); }
632
633 [[nodiscard]] const_iterator cbegin () const noexcept { return m_data; }
634
635 [[nodiscard]] const_iterator cend () const noexcept { return m_data + m_size; }
636
637 [[nodiscard]] const_reverse_iterator crbegin () const noexcept { return const_reverse_iterator(end()); }
638
639 [[nodiscard]] const_reverse_iterator crend () const noexcept { return const_reverse_iterator(begin()); }
640
641 void resize (size_type a_new_size)
642 {
643 auto old_size = m_size;
644 resize_without_init_snan(a_new_size);
645 if (old_size < a_new_size) {
647 m_size - old_size, (Allocator const&)(*this));
648 }
649 }
650
651 void resize (size_type a_new_size, const T& a_val)
652 {
653 size_type old_size = m_size;
654 resize_without_init_snan(a_new_size);
655 if (old_size < a_new_size)
656 {
658 m_size - old_size, a_val,
659 (Allocator const&)(*this));
660 }
661 }
662
663 void reserve (size_type a_capacity)
664 {
665 if (m_capacity < a_capacity) {
666 auto fp = detail::allocate_in_place(m_data, a_capacity, a_capacity,
667 (Allocator&)(*this));
668 UpdateDataPtr(fp);
669 }
670 }
671
673 {
674 if (m_data != nullptr) {
675 if (m_size == 0) {
676 deallocate(m_data, m_capacity);
677 m_data = nullptr;
678 m_capacity = 0;
679 } else if (m_size < m_capacity) {
680 auto* new_data = detail::shrink_in_place(m_data, m_size,
681 (Allocator&)(*this));
682 if (new_data != m_data) {
683 detail::memCopyImpl(new_data, m_data, nBytes(),
684 (Allocator const&)(*this),
685 (Allocator const&)(*this));
686 deallocate(m_data, m_capacity);
687 }
689 }
690 }
691 }
692
693 void swap (PODVector<T, Allocator>& a_vector) noexcept
694 {
695 std::swap(m_data, a_vector.m_data);
696 std::swap(m_size, a_vector.m_size);
697 std::swap(m_capacity, a_vector.m_capacity);
698 std::swap(static_cast<Allocator&>(a_vector), static_cast<Allocator&>(*this));
699 }
700
701 private:
702
703 [[nodiscard]] size_type nBytes () const noexcept
704 {
705 return m_size*sizeof(T);
706 }
707
708 // this is where we would change the growth strategy for push_back
709 [[nodiscard]] size_type GetNewCapacityForPush () const noexcept
710 {
711 if (m_capacity == 0) {
712 return std::max(64/sizeof(T), size_type(1));
713 } else {
715 if (amrex::almostEqual(gf, Real(1.5))) {
716 return (m_capacity*3+1)/2;
717 } else {
718 return size_type(gf*Real(m_capacity+1));
719 }
720 }
721 }
722
723 void UpdateDataPtr (FatPtr<T> const& fp)
724 {
725 auto* new_data = fp.ptr();
726 auto new_capacity = fp.size();
727 if (m_data != nullptr && m_data != new_data) {
728 if (m_size > 0) {
729 detail::memCopyImpl(new_data, m_data, nBytes(),
730 (Allocator const&)(*this),
731 (Allocator const&)(*this));
732 }
733 deallocate(m_data, capacity());
734 }
735 m_data = new_data;
736 m_capacity = new_capacity;
737 }
738
739 // This is where we play games with the allocator. This function
740 // updates m_data and m_capacity, but not m_size.
741 void AllocateBufferForPush (size_type target_capacity)
742 {
743 auto fp = detail::allocate_in_place(m_data, m_size+1, target_capacity,
744 (Allocator&)(*this));
745 UpdateDataPtr(fp);
746 }
747
748 // This is where we play games with the allocator and the growth
749 // strategy for insert. This function updates m_data, m_size and
750 // m_capacity.
752 {
753 size_type new_size = m_size + a_count;
754 size_type new_capacity = std::max(new_size, GetNewCapacityForPush());
755 auto fp = detail::allocate_in_place(m_data, new_size, new_capacity,
756 (Allocator&)(*this));
757 auto* new_data = fp.ptr();
758 new_capacity = fp.size();
759
760 if (m_data != nullptr) {
761 if (m_data == new_data) {
762 if (m_size > a_index) {
763 detail::memMoveImpl(m_data+a_index+a_count, m_data+a_index,
764 (m_size-a_index)*sizeof(T),
765 (Allocator const&)(*this));
766 }
767 } else {
768 if (m_size > 0) {
769 if (a_index > 0) {
770 detail::memCopyImpl(new_data, m_data, a_index*sizeof(T),
771 (Allocator const&)(*this),
772 (Allocator const&)(*this), false);
773 }
774 if (m_size > a_index) {
775 detail::memCopyImpl(new_data+a_index+a_count, m_data+a_index,
776 (m_size-a_index)*sizeof(T),
777 (Allocator const&)(*this),
778 (Allocator const&)(*this), false);
779 }
781 }
782 deallocate(m_data, m_capacity);
783 }
784 }
785 m_data = new_data;
786 m_size = new_size;
787 m_capacity = new_capacity;
788 }
789
791 {
792 if (m_capacity < a_new_size) {
793 reserve(a_new_size);
794 }
795 m_size = a_new_size;
796 }
797 };
798}
799
800#endif
#define AMREX_EXPORT
Definition AMReX_Extension.H:191
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:681
Definition AMReX_PODVector.H:262
PODVector(std::initializer_list< T > a_initializer_list, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:317
iterator insert(const_iterator a_pos, T &&a_item)
Definition AMReX_PODVector.H:469
const_iterator begin() const noexcept
Definition AMReX_PODVector.H:619
void resize_without_init_snan(size_type a_new_size)
Definition AMReX_PODVector.H:790
const_iterator cbegin() const noexcept
Definition AMReX_PODVector.H:633
iterator insert(const_iterator a_pos, const T &a_item)
Definition AMReX_PODVector.H:442
PODVector & operator=(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:364
iterator erase(const_iterator a_pos)
Definition AMReX_PODVector.H:421
void reserve(size_type a_capacity)
Definition AMReX_PODVector.H:663
iterator insert(const_iterator a_pos, size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:447
size_type size() const noexcept
Definition AMReX_PODVector.H:591
const T * const_pointer
Definition AMReX_PODVector.H:282
void swap(PODVector< T, Allocator > &a_vector) noexcept
Definition AMReX_PODVector.H:693
void UpdateDataPtr(FatPtr< T > const &fp)
Definition AMReX_PODVector.H:723
const_reverse_iterator crbegin() const noexcept
Definition AMReX_PODVector.H:637
PODVector(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:330
std::reverse_iterator< iterator > reverse_iterator
Definition AMReX_PODVector.H:279
T * pointer
Definition AMReX_PODVector.H:277
void shrink_to_fit()
Definition AMReX_PODVector.H:672
void assign(const T &a_value)
Definition AMReX_PODVector.H:565
void pop_back() noexcept
Definition AMReX_PODVector.H:587
size_type nBytes() const noexcept
Definition AMReX_PODVector.H:703
iterator insert(const_iterator a_pos, std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:475
iterator insert(const_iterator a_pos, InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:500
T * iterator
Definition AMReX_PODVector.H:278
void AllocateBufferForPush(size_type target_capacity)
Definition AMReX_PODVector.H:741
size_type GetNewCapacityForPush() const noexcept
Definition AMReX_PODVector.H:709
reverse_iterator rend() noexcept
Definition AMReX_PODVector.H:629
PODVector(size_type a_size)
Definition AMReX_PODVector.H:297
allocator_type get_allocator() const noexcept
Definition AMReX_PODVector.H:570
iterator begin() noexcept
Definition AMReX_PODVector.H:617
void assign(std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:537
iterator end() noexcept
Definition AMReX_PODVector.H:621
T value_type
Definition AMReX_PODVector.H:271
const T * dataPtr() const noexcept
Definition AMReX_PODVector.H:615
constexpr PODVector() noexcept=default
void assign(size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:526
const_reverse_iterator rbegin() const noexcept
Definition AMReX_PODVector.H:627
std::size_t size_type
Definition AMReX_PODVector.H:273
const_reverse_iterator rend() const noexcept
Definition AMReX_PODVector.H:631
size_type m_size
Definition AMReX_PODVector.H:288
void assign(InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:549
T & front() noexcept
Definition AMReX_PODVector.H:601
pointer m_data
Definition AMReX_PODVector.H:287
const_reverse_iterator crend() const noexcept
Definition AMReX_PODVector.H:639
reverse_iterator rbegin() noexcept
Definition AMReX_PODVector.H:625
iterator erase(const_iterator a_first, const_iterator a_last)
Definition AMReX_PODVector.H:430
void clear() noexcept
Definition AMReX_PODVector.H:589
size_type capacity() const noexcept
Definition AMReX_PODVector.H:593
const_iterator cend() const noexcept
Definition AMReX_PODVector.H:635
size_type m_capacity
Definition AMReX_PODVector.H:288
T * dataPtr() noexcept
Definition AMReX_PODVector.H:613
PODVector(PODVector< T, Allocator > &&a_vector) noexcept
Definition AMReX_PODVector.H:343
T & reference
Definition AMReX_PODVector.H:276
const T * data() const noexcept
Definition AMReX_PODVector.H:611
const T * const_iterator
Definition AMReX_PODVector.H:283
void resize(size_type a_new_size)
Definition AMReX_PODVector.H:641
~PODVector()
Definition AMReX_PODVector.H:354
const_iterator end() const noexcept
Definition AMReX_PODVector.H:623
const T & const_reference
Definition AMReX_PODVector.H:281
std::reverse_iterator< const_iterator > const_reverse_iterator
Definition AMReX_PODVector.H:284
T & back() noexcept
Definition AMReX_PODVector.H:605
void resize(size_type a_new_size, const T &a_val)
Definition AMReX_PODVector.H:651
std::ptrdiff_t difference_type
Definition AMReX_PODVector.H:274
const T & back() const noexcept
Definition AMReX_PODVector.H:607
T & operator[](size_type a_index) noexcept
Definition AMReX_PODVector.H:597
PODVector(size_type a_size, const value_type &a_value, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:306
T * data() noexcept
Definition AMReX_PODVector.H:609
const T & front() const noexcept
Definition AMReX_PODVector.H:603
bool empty() const noexcept
Definition AMReX_PODVector.H:595
void AllocateBufferForInsert(size_type a_index, size_type a_count)
Definition AMReX_PODVector.H:751
void push_back(const T &a_value)
Definition AMReX_PODVector.H:572
Allocator allocator_type
Definition AMReX_PODVector.H:272
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:279
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:237
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:265
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:251
void ValidateUserInput()
Definition AMReX_PODVector.cpp:15
Real growth_factor
Definition AMReX_PODVector.cpp:7
void Initialize()
Definition AMReX_PODVector.cpp:34
Real GetGrowthFactor()
Definition AMReX_PODVector.H:249
void SetGrowthFactor(Real a_factor)
Definition AMReX_PODVector.cpp:41
void memCopyImpl(void *dst, const void *src, std::size_t count, Allocator const &dst_allocator, Allocator const &src_allocator, bool sync=true)
Definition AMReX_PODVector.H:152
void fillValuesImpl(T *dst, T const *src, Size count, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:110
void uninitializedFillNImpl(T *data, Size count, const T &value, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:46
T * shrink_in_place(T *p, Size n, Allocator< T > &allocator)
Definition AMReX_PODVector.H:36
void memMoveImpl(void *dst, const void *src, std::size_t count, Allocator const &allocator)
Definition AMReX_PODVector.H:188
FatPtr< T > allocate_in_place(T *p, Size nmin, Size nmax, Allocator< T > &allocator)
Definition AMReX_PODVector.H:24
void maybe_init_snan(T *data, Size count, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:219
void initFromListImpl(T *data, std::initializer_list< T > const &list, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:84
Definition AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:191
bool InitSNaN() noexcept
Definition AMReX.cpp:173
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:127
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition AMReX_Algorithm.H:93
Arena * The_Arena()
Definition AMReX_Arena.cpp:616
Definition AMReX_FabArrayCommI.H:896
Definition AMReX_GpuAllocators.H:24
constexpr T * ptr() const noexcept
Definition AMReX_GpuAllocators.H:27
constexpr std::size_t size() const noexcept
Definition AMReX_GpuAllocators.H:28
Definition AMReX_GpuAllocators.H:161
Definition AMReX_GpuAllocators.H:172
Definition AMReX_GpuAllocators.H:158