Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_PODVector.H
Go to the documentation of this file.
1#ifndef AMREX_PODVECTOR_H_
2#define AMREX_PODVECTOR_H_
3#include <AMReX_Config.H>
4
5#include <AMReX.H>
6#include <AMReX_Arena.H>
7#include <AMReX_Enum.H>
8#include <AMReX_GpuLaunch.H>
10#include <AMReX_GpuDevice.H>
11#include <AMReX_MemPool.H>
12#include <AMReX_TypeTraits.H>
13
14#include <cmath>
15#include <iterator>
16#include <type_traits>
17#include <utility>
18#include <memory>
19#include <cstring>
20
21namespace amrex
22{
23 namespace detail
24 {
25 template <typename T, typename Size, template<class> class Allocator>
26 FatPtr<T> allocate_in_place ([[maybe_unused]] T* p, [[maybe_unused]] Size nmin, Size nmax,
27 Allocator<T>& allocator)
28 {
29 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
30 return allocator.allocate_in_place(p, nmin, nmax);
31 } else {
32 T* pnew = allocator.allocate(nmax);
33 return {pnew, nmax};
34 }
35 }
36
37 template <typename T, typename Size, template<class> class Allocator>
38 T* shrink_in_place ([[maybe_unused]] T* p, Size n, Allocator<T>& allocator)
39 {
40 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
41 return allocator.shrink_in_place(p, n);
42 } else {
43 return allocator.allocate(n);
44 }
45 }
46
47 template <typename T, typename Size, template<class> class Allocator>
48 void uninitializedFillNImpl (T* data, Size count, const T& value,
49 [[maybe_unused]] Allocator<T> const& allocator)
50 {
51#ifdef AMREX_USE_GPU
52#ifdef _WIN32
53 if (RunOnGpu<Allocator<T>>::value)
54#else
55 if constexpr (RunOnGpu<Allocator<T>>::value)
56#endif
57 {
58 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
59 data[i] = value;
60 });
62 return;
63 }
64#ifdef _WIN32
65 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
66#else
67 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
68#endif
69 {
70 if (allocator.arena()->isManaged() ||
71 allocator.arena()->isDevice())
72 {
73 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
74 {
75 data[i] = value;
76 });
78 return;
79 }
80 }
81#endif
82 std::uninitialized_fill_n(data, count, value);
83 }
84
85 template <typename T, template<class> class Allocator>
86 void initFromListImpl (T* data, std::initializer_list<T> const& list,
87 [[maybe_unused]] Allocator<T> const & allocator)
88 {
89 auto count = list.size() * sizeof(T);
90#ifdef AMREX_USE_GPU
91 if constexpr (RunOnGpu<Allocator<T>>::value)
92 {
93 Gpu::htod_memcpy_async(data, std::data(list), count);
95 return;
96 }
97 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
98 {
99 if (allocator.arena()->isManaged() ||
100 allocator.arena()->isDevice())
101 {
102 Gpu::htod_memcpy_async(data, std::data(list), count);
104 return;
105 }
106 }
107#endif
108 std::memcpy(data, std::data(list), count);
109 }
110
111 template <typename T, typename Size, template<class> class Allocator>
112 void fillValuesImpl (T* dst, T const* src, Size count,
113 [[maybe_unused]] Allocator<T> const& allocator)
114 {
115#ifdef AMREX_USE_GPU
116#ifdef _WIN32
117 if (RunOnGpu<Allocator<T>>::value)
118#else
119 if constexpr (RunOnGpu<Allocator<T>>::value)
120#endif
121 {
122 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
123 dst[i] = src[i];
124 });
126 return;
127 }
128#ifdef _WIN32
129 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
130#else
131 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
132#endif
133 {
134 if (allocator.arena()->isManaged() ||
135 allocator.arena()->isDevice())
136 {
137 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
138 {
139 dst[i] = src[i];
140 });
142 return;
143 }
144 }
145#else
146 static_assert(RunOnGpu<Allocator<T>>::value == false);
147#endif
148 if constexpr (! RunOnGpu<Allocator<T>>::value) {
149 for (Size i = 0; i < count; ++i) { dst[i] = src[i]; }
150 }
151 }
152
153 template <typename Allocator>
154 void memCopyImpl (void* dst, const void* src, std::size_t count,
155 [[maybe_unused]] Allocator const& dst_allocator,
156 [[maybe_unused]] Allocator const& src_allocator,
157 [[maybe_unused]] bool sync = true)
158 {
159#ifdef AMREX_USE_GPU
160 if constexpr (RunOnGpu<Allocator>::value)
161 {
162 Gpu::dtod_memcpy_async(dst, src, count);
163 if (sync) { Gpu::streamSynchronize(); }
164 return;
165 }
167 {
168 bool dst_on_device = dst_allocator.arena()->isManaged() ||
169 dst_allocator.arena()->isDevice();
170 bool src_on_device = src_allocator.arena()->isManaged() ||
171 src_allocator.arena()->isDevice();
172 if (dst_on_device || src_on_device)
173 {
174 if (dst_on_device && src_on_device) {
175 Gpu::dtod_memcpy_async(dst, src, count);
176 } else if (dst_on_device) {
177 Gpu::htod_memcpy_async(dst, src, count);
178 } else {
179 Gpu::dtoh_memcpy_async(dst, src, count);
180 }
181 if (sync) { Gpu::streamSynchronize(); }
182 return;
183 }
184 }
185#endif
186 std::memcpy(dst, src, count);
187 }
188
189 template <typename Allocator>
190 void memMoveImpl (void* dst, const void* src, std::size_t count,
191 [[maybe_unused]] Allocator const& allocator)
192 {
193#ifdef AMREX_USE_GPU
194 if constexpr (RunOnGpu<Allocator>::value)
195 {
196 auto* tmp = The_Arena()->alloc(count);
197 Gpu::dtod_memcpy_async(tmp, src, count);
198 Gpu::dtod_memcpy_async(dst, tmp, count);
200 The_Arena()->free(tmp);
201 return;
202 }
204 {
205 if (allocator.arena()->isManaged() ||
206 allocator.arena()->isDevice())
207 {
208 auto* tmp = The_Arena()->alloc(count);
209 Gpu::dtod_memcpy_async(tmp, src, count);
210 Gpu::dtod_memcpy_async(dst, tmp, count);
212 The_Arena()->free(tmp);
213 return;
214 }
215 }
216#endif
217 std::memmove(dst, src, count);
218 }
219
220 template <typename T, typename Size, template<class> class Allocator>
221 void maybe_init_snan (T* data, Size count, Allocator<T> const& allocator)
222 {
223 amrex::ignore_unused(data, count, allocator);
224 if constexpr (std::is_same_v<float, std::remove_cv_t<T>> ||
225 std::is_same_v<double, std::remove_cv_t<T>>) {
226 if (amrex::InitSNaN()) {
227#ifdef AMREX_USE_GPU
228 if constexpr (RunOnGpu<Allocator<T>>::value) {
229 amrex::fill_snan<RunOn::Device>(data, count);
231 return;
232 } else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value) {
233 if (allocator.arena()->isManaged() ||
234 allocator.arena()->isDevice())
235 {
236 amrex::fill_snan<RunOn::Device>(data, count);
238 return;
239 }
240 }
241#endif
242 amrex::fill_snan<RunOn::Host>(data, count);
243 }
244 }
245 }
246 }
247
249
250 namespace VectorGrowthStrategy
251 {
252 extern AMREX_EXPORT Real growth_factor;
253 inline Real GetGrowthFactor () { return growth_factor; }
254 inline void SetGrowthFactor (Real a_factor);
255
256 namespace detail
257 {
258 void ValidateUserInput ();
259 }
260
261 void Initialize ();
262 }
263
264 inline std::size_t grow_podvector_capacity (GrowthStrategy strategy, std::size_t new_size,
265 std::size_t old_capacity, std::size_t sizeof_T)
266 {
267 switch (strategy) {
269 if (new_size <= 900) {
270 // 3*sqrt(900) = 900/10. Note that we don't need to be precise
271 // here. Even if later we change the else block to
272 // 4*std::sqrt(s), it's not really an issue to still use 900
273 // here.
274 return new_size + new_size/10;
275 } else {
276 return new_size + std::size_t(3*std::sqrt(new_size));
277 }
279 return new_size;
281 if (old_capacity == 0) {
282 return std::max(64/sizeof_T, new_size);
283 } else {
285 if (amrex::almostEqual(gf, Real(1.5))) {
286 return std::max((old_capacity*3+1)/2, new_size);
287 } else {
288 return std::max(std::size_t(gf*Real(old_capacity+1)), new_size);
289 }
290 }
291 }
292 return 0; // unreachable
293 }
294
295 template <class T, class Allocator = std::allocator<T> >
296 class PODVector : public Allocator
297 {
298 // static_assert(std::is_standard_layout<T>(), "PODVector can only hold standard layout types");
299 static_assert(std::is_trivially_copyable<T>(), "PODVector can only hold trivially copyable types");
300 // static_assert(std::is_trivially_default_constructible<T>(), "PODVector can only hold trivial dc types");
301
302 using Allocator::allocate;
303 using Allocator::deallocate;
304
305 public:
306 using value_type = T;
307 using allocator_type = Allocator;
308 using size_type = std::size_t;
309 using difference_type = std::ptrdiff_t;
310
311 using reference = T&;
312 using pointer = T*;
313 using iterator = T*;
314 using reverse_iterator = std::reverse_iterator<iterator>;
315
316 using const_reference = const T&;
317 using const_pointer = const T*;
318 using const_iterator = const T*;
319 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
320
321 private:
322 pointer m_data = nullptr;
324
325 public:
326 constexpr PODVector () noexcept = default;
327
328 constexpr explicit PODVector (const allocator_type& a_allocator) noexcept
329 : Allocator(a_allocator)
330 {}
331
332 explicit PODVector (size_type a_size)
333 : m_size(a_size),
335 {
336 if (m_capacity != 0) {
337 m_data = allocate(m_capacity);
338 if (a_size != 0) {
339 detail::maybe_init_snan(m_data, m_size, (Allocator const&)(*this));
340 }
341 }
342 }
343
344 PODVector (size_type a_size, const value_type& a_value,
345 const allocator_type& a_allocator = Allocator())
346 : Allocator(a_allocator), m_size(a_size),
348 {
349 if (m_capacity != 0) {
350 m_data = allocate(m_capacity);
351 if (a_size != 0) {
352 detail::uninitializedFillNImpl(m_data, a_size, a_value,
353 (Allocator const&)(*this));
354 }
355 }
356 }
357
358 PODVector (std::initializer_list<T> a_initializer_list,
359 const allocator_type& a_allocator = Allocator())
360 : Allocator(a_allocator),
361 m_size (a_initializer_list.size()),
363 GrowthStrategy::Poisson, a_initializer_list.size(), 0, sizeof(T)))
364 {
365 if (m_capacity != 0) {
366 m_data = allocate(m_capacity);
367 if (a_initializer_list.size() != 0) {
368 detail::initFromListImpl(m_data, a_initializer_list,
369 (Allocator const&)(*this));
370 }
371 }
372 }
373
375 : Allocator(a_vector),
376 m_size (a_vector.size()),
377 m_capacity(a_vector.capacity())
378 {
379 if (m_capacity != 0) {
380 m_data = allocate(m_capacity);
381 if (a_vector.size() != 0) {
382 detail::memCopyImpl(m_data, a_vector.m_data, a_vector.nBytes(),
383 (Allocator const&)(*this),
384 (Allocator const&)a_vector);
385 }
386 }
387 }
388
389 PODVector (PODVector<T, Allocator>&& a_vector) noexcept
390 : Allocator(static_cast<Allocator&&>(a_vector)),
391 m_data(a_vector.m_data),
392 m_size(a_vector.m_size),
393 m_capacity(a_vector.m_capacity)
394 {
395 a_vector.m_data = nullptr;
396 a_vector.m_size = 0;
397 a_vector.m_capacity = 0;
398 }
399
401 {
402 // let's not worry about other allocators
403 static_assert(std::is_same<Allocator,std::allocator<T>>::value ||
405 if (m_data != nullptr) {
406 deallocate(m_data, capacity());
407 }
408 }
409
411 {
412 if (this == &a_vector) { return *this; }
413
414 if ((Allocator const&)(*this) != (Allocator const&)a_vector) {
415 if (m_data != nullptr) {
416 deallocate(m_data, m_capacity);
417 m_data = nullptr;
418 m_size = 0;
419 m_capacity = 0;
420 }
421 (Allocator&)(*this) = (Allocator const&)a_vector;
422 }
423
424 const auto other_size = a_vector.size();
425 if ( other_size > m_capacity ) {
426 clear();
427 reserve_doit(other_size);
428 }
429
430 m_size = other_size;
431 if (m_size > 0) {
433 (Allocator const&)(*this),
434 (Allocator const&)a_vector);
435 }
436 return *this;
437 }
438
440 {
441 if (this == &a_vector) { return *this; }
442
443 if (static_cast<Allocator const&>(a_vector) ==
444 static_cast<Allocator const&>(*this))
445 {
446 if (m_data != nullptr) {
447 deallocate(m_data, m_capacity);
448 }
449
450 m_data = a_vector.m_data;
451 m_size = a_vector.m_size;
452 m_capacity = a_vector.m_capacity;
453
454 a_vector.m_data = nullptr;
455 a_vector.m_size = 0;
456 a_vector.m_capacity = 0;
457 }
458 else
459 {
460 // if the allocators are not the same we give up and copy
461 *this = a_vector; // must copy instead of move
462 }
463
464 return *this;
465 }
466
468 {
469 auto* pos = const_cast<iterator>(a_pos);
470 --m_size;
471 detail::memMoveImpl(pos, a_pos+1, (end() - pos)*sizeof(T),
472 (Allocator const&)(*this));
473 return pos;
474 }
475
477 {
478 size_type num_to_erase = a_last - a_first;
479 auto* first = const_cast<iterator>(a_first);
480 if (num_to_erase > 0) {
481 m_size -= num_to_erase;
482 detail::memMoveImpl(first, a_last, (end() - first)*sizeof(T),
483 (Allocator const&)(*this));
484 }
485 return first;
486 }
487
488 iterator insert (const_iterator a_pos, const T& a_item)
489 {
490 return insert(a_pos, 1, a_item);
491 }
492
493 iterator insert (const_iterator a_pos, size_type a_count, const T& a_value)
494 {
495 auto* pos = const_cast<iterator>(a_pos);
496 if (a_count > 0) {
497 if (m_capacity < m_size + a_count)
498 {
499 std::size_t insert_index = std::distance(m_data, pos);
500 AllocateBufferForInsert(insert_index, a_count);
501 pos = m_data + insert_index;
502 }
503 else
504 {
505 detail::memMoveImpl(pos+a_count, a_pos, (end() - pos) * sizeof(T),
506 (Allocator const&)(*this));
507 m_size += a_count;
508 }
509 detail::uninitializedFillNImpl(pos, a_count, a_value,
510 (Allocator const&)(*this));
511 }
512 return pos;
513 }
514
515 iterator insert (const_iterator a_pos, T&& a_item)
516 {
517 // This is *POD* vector after all
518 return insert(a_pos, 1, std::move(a_item));
519 }
520
522 std::initializer_list<T> a_initializer_list)
523 {
524 auto* pos = const_cast<iterator>(a_pos);
525 size_type count = a_initializer_list.size();
526 if (count > 0) {
527 if (m_capacity < m_size + count)
528 {
529 std::size_t insert_index = std::distance(m_data, pos);
530 AllocateBufferForInsert(insert_index, count);
531 pos = m_data + insert_index;
532 }
533 else
534 {
535 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
536 (Allocator const&)(*this));
537 m_size += count;
538 }
539 detail::initFromListImpl(pos, a_initializer_list,
540 (Allocator const&)(*this));
541 }
542 return pos;
543 }
544
545 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
546 iterator insert (const_iterator a_pos, InputIt a_first, InputIt a_last)
547 {
548 auto* pos = const_cast<iterator>(a_pos);
549 size_type count = std::distance(a_first, a_last);
550 if (count > 0) {
551 if (m_capacity < m_size + count)
552 {
553 std::size_t insert_index = std::distance(m_data, pos);
554 AllocateBufferForInsert(insert_index, count);
555 pos = m_data + insert_index;
556 }
557 else
558 {
559 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
560 (Allocator const&)(*this));
561 m_size += count;
562 }
563 // Unfortunately we don't know whether InputIt points
564 // GPU or CPU memory. We will assume it's the same as
565 // the vector.
566 detail::fillValuesImpl(pos, a_first, count,
567 (Allocator const&)(*this));
568 }
569 return pos;
570 }
571
572 void assign (size_type a_count, const T& a_value)
573 {
574 if ( a_count > m_capacity ) {
575 clear();
576 reserve(a_count);
577 }
578 m_size = a_count;
579 detail::uninitializedFillNImpl(m_data, a_count, a_value,
580 (Allocator const&)(*this));
581 }
582
583 void assign (std::initializer_list<T> a_initializer_list)
584 {
585 if (a_initializer_list.size() > m_capacity) {
586 clear();
587 reserve(a_initializer_list.size());
588 }
589 m_size = a_initializer_list.size();
590 detail::initFromListImpl(m_data, a_initializer_list,
591 (Allocator const&)(*this));
592 }
593
594 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
595 void assign (InputIt a_first, InputIt a_last)
596 {
597 std::size_t count = std::distance(a_first, a_last);
598 if (count > m_capacity) {
599 clear();
600 reserve(count);
601 }
602 m_size = count;
603 detail::fillValuesImpl(m_data, a_first, count,
604 (Allocator const&)(*this));
605 }
606
611 void assign (const T& a_value)
612 {
613 assign(m_size, a_value);
614 }
615
616 [[nodiscard]] allocator_type get_allocator () const noexcept { return *this; }
617
618 void push_back (const T& a_value)
619 {
620 if (m_size == m_capacity) {
621 auto new_capacity = GetNewCapacityForPush();
622 AllocateBufferForPush(new_capacity);
623 }
625 (Allocator const&)(*this));
626 ++m_size;
627 }
628
629 // Because T is trivial, there is no need for push_back(T&&)
630
631 // Don't have the emplace methods, but not sure how often we use those.
632
633 void pop_back () noexcept { --m_size; }
634
635 void clear () noexcept { m_size = 0; }
636
637 [[nodiscard]] size_type size () const noexcept { return m_size; }
638
639 [[nodiscard]] size_type capacity () const noexcept { return m_capacity; }
640
641 [[nodiscard]] bool empty () const noexcept { return m_size == 0 || m_data == nullptr; } // test m_data to avoid compiler warning
642
643 [[nodiscard]] T& operator[] (size_type a_index) noexcept { return m_data[a_index]; }
644
645 [[nodiscard]] const T& operator[] (size_type a_index) const noexcept { return m_data[a_index]; }
646
647 [[nodiscard]] T& front () noexcept { return *m_data; }
648
649 [[nodiscard]] const T& front () const noexcept { return *m_data; }
650
651 [[nodiscard]] T& back () noexcept { return *(m_data + m_size - 1); }
652
653 [[nodiscard]] const T& back () const noexcept { return *(m_data + m_size - 1); }
654
655 [[nodiscard]] T* data () noexcept { return m_data; }
656
657 [[nodiscard]] const T* data () const noexcept { return m_data; }
658
659 [[nodiscard]] T* dataPtr () noexcept { return m_data; }
660
661 [[nodiscard]] const T* dataPtr () const noexcept { return m_data; }
662
663 [[nodiscard]] iterator begin () noexcept { return m_data; }
664
665 [[nodiscard]] const_iterator begin () const noexcept { return m_data; }
666
667 [[nodiscard]] iterator end () noexcept { return m_data + m_size; }
668
669 [[nodiscard]] const_iterator end () const noexcept { return m_data + m_size; }
670
671 [[nodiscard]] reverse_iterator rbegin () noexcept { return reverse_iterator(end()); }
672
673 [[nodiscard]] const_reverse_iterator rbegin () const noexcept { return const_reverse_iterator(end()); }
674
675 [[nodiscard]] reverse_iterator rend () noexcept { return reverse_iterator(begin()); }
676
677 [[nodiscard]] const_reverse_iterator rend () const noexcept { return const_reverse_iterator(begin()); }
678
679 [[nodiscard]] const_iterator cbegin () const noexcept { return m_data; }
680
681 [[nodiscard]] const_iterator cend () const noexcept { return m_data + m_size; }
682
683 [[nodiscard]] const_reverse_iterator crbegin () const noexcept { return const_reverse_iterator(end()); }
684
685 [[nodiscard]] const_reverse_iterator crend () const noexcept { return const_reverse_iterator(begin()); }
686
717 void resize (size_type a_new_size,
719 {
720 auto old_size = m_size;
721 resize_without_init_snan(a_new_size, strategy);
722 if (old_size < a_new_size) {
724 m_size - old_size, (Allocator const&)(*this));
725 }
726 }
727
758 void resize (size_type a_new_size, const T& a_val,
760 {
761 size_type old_size = m_size;
762 resize_without_init_snan(a_new_size, strategy);
763 if (old_size < a_new_size)
764 {
766 m_size - old_size, a_val,
767 (Allocator const&)(*this));
768 }
769 }
770
801 {
802 if (m_capacity < a_capacity) {
803 reserve_doit(grow_podvector_capacity(strategy, a_capacity, m_capacity, sizeof(T)));
804 }
805 }
806
808 {
809 if (m_data != nullptr) {
810 if (m_size == 0) {
811 deallocate(m_data, m_capacity);
812 m_data = nullptr;
813 m_capacity = 0;
814 } else if (m_size < m_capacity) {
815 auto* new_data = detail::shrink_in_place(m_data, m_size,
816 (Allocator&)(*this));
817 if (new_data != m_data) {
818 detail::memCopyImpl(new_data, m_data, nBytes(),
819 (Allocator const&)(*this),
820 (Allocator const&)(*this));
821 deallocate(m_data, m_capacity);
822 }
823 m_data = new_data;
825 }
826 }
827 }
828
829 void swap (PODVector<T, Allocator>& a_vector) noexcept
830 {
831 std::swap(m_data, a_vector.m_data);
832 std::swap(m_size, a_vector.m_size);
833 std::swap(m_capacity, a_vector.m_capacity);
834 std::swap(static_cast<Allocator&>(a_vector), static_cast<Allocator&>(*this));
835 }
836
842 void free_async () noexcept
843 {
844 if (m_data != nullptr) {
846 Gpu::freeAsync(Allocator::arena(), m_data);
847 } else {
848 deallocate(m_data, capacity());
849 }
850 m_data = nullptr;
851 m_size = 0;
852 m_capacity = 0;
853 }
854 }
855
856 private:
857
858 void reserve_doit (size_type a_capacity) {
859 if (m_capacity < a_capacity) {
860 auto fp = detail::allocate_in_place(m_data, a_capacity, a_capacity,
861 (Allocator&)(*this));
862 UpdateDataPtr(fp);
863 }
864 }
865
866 [[nodiscard]] size_type nBytes () const noexcept
867 {
868 return m_size*sizeof(T);
869 }
870
871 // this is where we would change the growth strategy for push_back
872 [[nodiscard]] size_type GetNewCapacityForPush () const noexcept
873 {
875 m_capacity, sizeof(T));
876 }
877
878 void UpdateDataPtr (FatPtr<T> const& fp)
879 {
880 auto* new_data = fp.ptr();
881 auto new_capacity = fp.size();
882 if (m_data != nullptr && m_data != new_data) {
883 if (m_size > 0) {
884 detail::memCopyImpl(new_data, m_data, nBytes(),
885 (Allocator const&)(*this),
886 (Allocator const&)(*this));
887 }
888 deallocate(m_data, capacity());
889 }
890 m_data = new_data;
891 m_capacity = new_capacity;
892 }
893
894 // This is where we play games with the allocator. This function
895 // updates m_data and m_capacity, but not m_size.
896 void AllocateBufferForPush (size_type target_capacity)
897 {
898 auto fp = detail::allocate_in_place(m_data, m_size+1, target_capacity,
899 (Allocator&)(*this));
900 UpdateDataPtr(fp);
901 }
902
903 // This is where we play games with the allocator and the growth
904 // strategy for insert. This function updates m_data, m_size and
905 // m_capacity.
907 {
908 size_type new_size = m_size + a_count;
909 size_type new_capacity = std::max(new_size, GetNewCapacityForPush());
910 auto fp = detail::allocate_in_place(m_data, new_size, new_capacity,
911 (Allocator&)(*this));
912 auto* new_data = fp.ptr();
913 new_capacity = fp.size();
914
915 if (m_data != nullptr) {
916 if (m_data == new_data) {
917 if (m_size > a_index) {
918 detail::memMoveImpl(m_data+a_index+a_count, m_data+a_index,
919 (m_size-a_index)*sizeof(T),
920 (Allocator const&)(*this));
921 }
922 } else {
923 if (m_size > 0) {
924 if (a_index > 0) {
925 detail::memCopyImpl(new_data, m_data, a_index*sizeof(T),
926 (Allocator const&)(*this),
927 (Allocator const&)(*this), false);
928 }
929 if (m_size > a_index) {
930 detail::memCopyImpl(new_data+a_index+a_count, m_data+a_index,
931 (m_size-a_index)*sizeof(T),
932 (Allocator const&)(*this),
933 (Allocator const&)(*this), false);
934 }
936 }
937 deallocate(m_data, m_capacity);
938 }
939 }
940 m_data = new_data;
941 m_size = new_size;
942 m_capacity = new_capacity;
943 }
944
946 {
947 if (m_capacity < a_new_size) {
948 reserve(a_new_size, strategy);
949 }
950 m_size = a_new_size;
951 }
952 };
953}
954
955#endif
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:206
#define AMREX_EXPORT
Definition AMReX_Extension.H:191
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:750
Definition AMReX_PODVector.H:297
PODVector(std::initializer_list< T > a_initializer_list, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:358
iterator insert(const_iterator a_pos, T &&a_item)
Definition AMReX_PODVector.H:515
void reserve(size_type a_capacity, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:800
const_iterator begin() const noexcept
Definition AMReX_PODVector.H:665
const_iterator cbegin() const noexcept
Definition AMReX_PODVector.H:679
iterator insert(const_iterator a_pos, const T &a_item)
Definition AMReX_PODVector.H:488
PODVector & operator=(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:410
iterator erase(const_iterator a_pos)
Definition AMReX_PODVector.H:467
iterator insert(const_iterator a_pos, size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:493
size_type size() const noexcept
Definition AMReX_PODVector.H:637
const T * const_pointer
Definition AMReX_PODVector.H:317
void swap(PODVector< T, Allocator > &a_vector) noexcept
Definition AMReX_PODVector.H:829
void UpdateDataPtr(FatPtr< T > const &fp)
Definition AMReX_PODVector.H:878
const_reverse_iterator crbegin() const noexcept
Definition AMReX_PODVector.H:683
PODVector(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:374
std::reverse_iterator< iterator > reverse_iterator
Definition AMReX_PODVector.H:314
T * pointer
Definition AMReX_PODVector.H:312
void shrink_to_fit()
Definition AMReX_PODVector.H:807
void assign(const T &a_value)
Definition AMReX_PODVector.H:611
void pop_back() noexcept
Definition AMReX_PODVector.H:633
size_type nBytes() const noexcept
Definition AMReX_PODVector.H:866
iterator insert(const_iterator a_pos, std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:521
iterator insert(const_iterator a_pos, InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:546
T * iterator
Definition AMReX_PODVector.H:313
void AllocateBufferForPush(size_type target_capacity)
Definition AMReX_PODVector.H:896
size_type GetNewCapacityForPush() const noexcept
Definition AMReX_PODVector.H:872
reverse_iterator rend() noexcept
Definition AMReX_PODVector.H:675
PODVector(size_type a_size)
Definition AMReX_PODVector.H:332
allocator_type get_allocator() const noexcept
Definition AMReX_PODVector.H:616
iterator begin() noexcept
Definition AMReX_PODVector.H:663
void resize(size_type a_new_size, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:717
void free_async() noexcept
Definition AMReX_PODVector.H:842
void reserve_doit(size_type a_capacity)
Definition AMReX_PODVector.H:858
void assign(std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:583
iterator end() noexcept
Definition AMReX_PODVector.H:667
T value_type
Definition AMReX_PODVector.H:306
const T * dataPtr() const noexcept
Definition AMReX_PODVector.H:661
constexpr PODVector() noexcept=default
void assign(size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:572
const_reverse_iterator rbegin() const noexcept
Definition AMReX_PODVector.H:673
void resize_without_init_snan(size_type a_new_size, GrowthStrategy strategy)
Definition AMReX_PODVector.H:945
std::size_t size_type
Definition AMReX_PODVector.H:308
const_reverse_iterator rend() const noexcept
Definition AMReX_PODVector.H:677
size_type m_size
Definition AMReX_PODVector.H:323
void assign(InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:595
T & front() noexcept
Definition AMReX_PODVector.H:647
pointer m_data
Definition AMReX_PODVector.H:322
const_reverse_iterator crend() const noexcept
Definition AMReX_PODVector.H:685
reverse_iterator rbegin() noexcept
Definition AMReX_PODVector.H:671
iterator erase(const_iterator a_first, const_iterator a_last)
Definition AMReX_PODVector.H:476
void clear() noexcept
Definition AMReX_PODVector.H:635
size_type capacity() const noexcept
Definition AMReX_PODVector.H:639
const_iterator cend() const noexcept
Definition AMReX_PODVector.H:681
size_type m_capacity
Definition AMReX_PODVector.H:323
T * dataPtr() noexcept
Definition AMReX_PODVector.H:659
PODVector(PODVector< T, Allocator > &&a_vector) noexcept
Definition AMReX_PODVector.H:389
T & reference
Definition AMReX_PODVector.H:311
const T * data() const noexcept
Definition AMReX_PODVector.H:657
const T * const_iterator
Definition AMReX_PODVector.H:318
~PODVector()
Definition AMReX_PODVector.H:400
const_iterator end() const noexcept
Definition AMReX_PODVector.H:669
void resize(size_type a_new_size, const T &a_val, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:758
const T & const_reference
Definition AMReX_PODVector.H:316
std::reverse_iterator< const_iterator > const_reverse_iterator
Definition AMReX_PODVector.H:319
T & back() noexcept
Definition AMReX_PODVector.H:651
std::ptrdiff_t difference_type
Definition AMReX_PODVector.H:309
const T & back() const noexcept
Definition AMReX_PODVector.H:653
T & operator[](size_type a_index) noexcept
Definition AMReX_PODVector.H:643
PODVector(size_type a_size, const value_type &a_value, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:344
T * data() noexcept
Definition AMReX_PODVector.H:655
const T & front() const noexcept
Definition AMReX_PODVector.H:649
bool empty() const noexcept
Definition AMReX_PODVector.H:641
void AllocateBufferForInsert(size_type a_index, size_type a_count)
Definition AMReX_PODVector.H:906
void push_back(const T &a_value)
Definition AMReX_PODVector.H:618
Allocator allocator_type
Definition AMReX_PODVector.H:307
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:317
void freeAsync(Arena *arena, void *mem) noexcept
Definition AMReX_GpuDevice.H:281
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:260
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:303
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:289
void ValidateUserInput()
Definition AMReX_PODVector.cpp:15
void Initialize()
Definition AMReX_PODVector.cpp:34
Real growth_factor
Definition AMReX_PODVector.cpp:7
Real GetGrowthFactor()
Definition AMReX_PODVector.H:253
void SetGrowthFactor(Real a_factor)
Definition AMReX_PODVector.cpp:41
void memCopyImpl(void *dst, const void *src, std::size_t count, Allocator const &dst_allocator, Allocator const &src_allocator, bool sync=true)
Definition AMReX_PODVector.H:154
void fillValuesImpl(T *dst, T const *src, Size count, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:112
void uninitializedFillNImpl(T *data, Size count, const T &value, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:48
T * shrink_in_place(T *p, Size n, Allocator< T > &allocator)
Definition AMReX_PODVector.H:38
void memMoveImpl(void *dst, const void *src, std::size_t count, Allocator const &allocator)
Definition AMReX_PODVector.H:190
FatPtr< T > allocate_in_place(T *p, Size nmin, Size nmax, Allocator< T > &allocator)
Definition AMReX_PODVector.H:26
void maybe_init_snan(T *data, Size count, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:221
void initFromListImpl(T *data, std::initializer_list< T > const &list, Allocator< T > const &allocator)
Definition AMReX_PODVector.H:86
Definition AMReX_Amr.cpp:49
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:191
bool InitSNaN() noexcept
Definition AMReX.cpp:173
GrowthStrategy
Definition AMReX_PODVector.H:248
__host__ __device__ std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition AMReX_Algorithm.H:93
std::size_t grow_podvector_capacity(GrowthStrategy strategy, std::size_t new_size, std::size_t old_capacity, std::size_t sizeof_T)
Definition AMReX_PODVector.H:264
Arena * The_Arena()
Definition AMReX_Arena.cpp:705
Definition AMReX_FabArrayCommI.H:1000
Definition AMReX_GpuAllocators.H:24
constexpr T * ptr() const noexcept
Definition AMReX_GpuAllocators.H:27
constexpr std::size_t size() const noexcept
Definition AMReX_GpuAllocators.H:28
Definition AMReX_GpuAllocators.H:161
Definition AMReX_GpuAllocators.H:172
Definition AMReX_GpuAllocators.H:158