Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_PODVector.H
Go to the documentation of this file.
1#ifndef AMREX_PODVECTOR_H_
2#define AMREX_PODVECTOR_H_
3#include <AMReX_Config.H>
4
5#include <AMReX.H>
6#include <AMReX_Arena.H>
7#include <AMReX_Enum.H>
8#include <AMReX_GpuLaunch.H>
10#include <AMReX_GpuDevice.H>
11#include <AMReX_MemPool.H>
12#include <AMReX_TypeTraits.H>
13
14#include <cmath>
15#include <iterator>
16#include <type_traits>
17#include <utility>
18#include <memory>
19#include <cstring>
20
21namespace amrex
22{
24 namespace detail
25 {
26 template <typename T, typename Size, template<class> class Allocator>
27 FatPtr<T> allocate_in_place ([[maybe_unused]] T* p, [[maybe_unused]] Size nmin, Size nmax,
28 Allocator<T>& allocator)
29 {
30 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
31 return allocator.allocate_in_place(p, nmin, nmax);
32 } else {
33 T* pnew = allocator.allocate(nmax);
34 return {pnew, nmax};
35 }
36 }
37
38 template <typename T, typename Size, template<class> class Allocator>
39 T* shrink_in_place ([[maybe_unused]] T* p, Size n, Allocator<T>& allocator)
40 {
41 if constexpr (IsArenaAllocator<Allocator<T>>::value) {
42 return allocator.shrink_in_place(p, n);
43 } else {
44 return allocator.allocate(n);
45 }
46 }
47
48 template <typename T, typename Size, template<class> class Allocator>
49 void uninitializedFillNImpl (T* data, Size count, const T& value,
50 [[maybe_unused]] Allocator<T> const& allocator)
51 {
52#ifdef AMREX_USE_GPU
53#ifdef _WIN32
54 if (RunOnGpu<Allocator<T>>::value)
55#else
56 if constexpr (RunOnGpu<Allocator<T>>::value)
57#endif
58 {
59 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
60 data[i] = value;
61 });
63 return;
64 }
65#ifdef _WIN32
66 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
67#else
68 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
69#endif
70 {
71 if (allocator.arena()->isManaged() ||
72 allocator.arena()->isDevice())
73 {
74 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
75 {
76 data[i] = value;
77 });
79 return;
80 }
81 }
82#endif
83 std::uninitialized_fill_n(data, count, value);
84 }
85
86 template <typename T, template<class> class Allocator>
87 void initFromListImpl (T* data, std::initializer_list<T> const& list,
88 [[maybe_unused]] Allocator<T> const & allocator)
89 {
90 auto count = list.size() * sizeof(T);
91#ifdef AMREX_USE_GPU
92 if constexpr (RunOnGpu<Allocator<T>>::value)
93 {
94 Gpu::htod_memcpy_async(data, std::data(list), count);
96 return;
97 }
98 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
99 {
100 if (allocator.arena()->isManaged() ||
101 allocator.arena()->isDevice())
102 {
103 Gpu::htod_memcpy_async(data, std::data(list), count);
105 return;
106 }
107 }
108#endif
109 std::memcpy(data, std::data(list), count);
110 }
111
112 template <typename T, typename Size, template<class> class Allocator>
113 void fillValuesImpl (T* dst, T const* src, Size count,
114 [[maybe_unused]] Allocator<T> const& allocator)
115 {
116#ifdef AMREX_USE_GPU
117#ifdef _WIN32
118 if (RunOnGpu<Allocator<T>>::value)
119#else
120 if constexpr (RunOnGpu<Allocator<T>>::value)
121#endif
122 {
123 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
124 dst[i] = src[i];
125 });
127 return;
128 }
129#ifdef _WIN32
130 else if (IsPolymorphicArenaAllocator<Allocator<T>>::value)
131#else
132 else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
133#endif
134 {
135 if (allocator.arena()->isManaged() ||
136 allocator.arena()->isDevice())
137 {
138 amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
139 {
140 dst[i] = src[i];
141 });
143 return;
144 }
145 }
146#else
147 static_assert(RunOnGpu<Allocator<T>>::value == false);
148#endif
149 if constexpr (! RunOnGpu<Allocator<T>>::value) {
150 for (Size i = 0; i < count; ++i) { dst[i] = src[i]; }
151 }
152 }
153
154 template <typename Allocator>
155 void memCopyImpl (void* dst, const void* src, std::size_t count,
156 [[maybe_unused]] Allocator const& dst_allocator,
157 [[maybe_unused]] Allocator const& src_allocator,
158 [[maybe_unused]] bool sync = true)
159 {
160#ifdef AMREX_USE_GPU
161 if constexpr (RunOnGpu<Allocator>::value)
162 {
163 Gpu::dtod_memcpy_async(dst, src, count);
164 if (sync) { Gpu::streamSynchronize(); }
165 return;
166 }
167 else if constexpr (IsPolymorphicArenaAllocator<Allocator>::value)
168 {
169 bool dst_on_device = dst_allocator.arena()->isManaged() ||
170 dst_allocator.arena()->isDevice();
171 bool src_on_device = src_allocator.arena()->isManaged() ||
172 src_allocator.arena()->isDevice();
173 if (dst_on_device || src_on_device)
174 {
175 if (dst_on_device && src_on_device) {
176 Gpu::dtod_memcpy_async(dst, src, count);
177 } else if (dst_on_device) {
178 Gpu::htod_memcpy_async(dst, src, count);
179 } else {
180 Gpu::dtoh_memcpy_async(dst, src, count);
181 }
182 if (sync) { Gpu::streamSynchronize(); }
183 return;
184 }
185 }
186#endif
187 std::memcpy(dst, src, count);
188 }
189
190 template <typename Allocator>
191 void memMoveImpl (void* dst, const void* src, std::size_t count,
192 [[maybe_unused]] Allocator const& allocator)
193 {
194#ifdef AMREX_USE_GPU
195 if constexpr (RunOnGpu<Allocator>::value)
196 {
197 auto* tmp = The_Arena()->alloc(count);
198 Gpu::dtod_memcpy_async(tmp, src, count);
199 Gpu::dtod_memcpy_async(dst, tmp, count);
201 The_Arena()->free(tmp);
202 return;
203 }
204 else if constexpr (IsPolymorphicArenaAllocator<Allocator>::value)
205 {
206 if (allocator.arena()->isManaged() ||
207 allocator.arena()->isDevice())
208 {
209 auto* tmp = The_Arena()->alloc(count);
210 Gpu::dtod_memcpy_async(tmp, src, count);
211 Gpu::dtod_memcpy_async(dst, tmp, count);
213 The_Arena()->free(tmp);
214 return;
215 }
216 }
217#endif
218 std::memmove(dst, src, count);
219 }
220
221 template <typename T, typename Size, template<class> class Allocator>
222 void maybe_init_snan (T* data, Size count, Allocator<T> const& allocator)
223 {
224 amrex::ignore_unused(data, count, allocator);
225 if constexpr (std::is_same_v<float, std::remove_cv_t<T>> ||
226 std::is_same_v<double, std::remove_cv_t<T>>) {
227 if (amrex::InitSNaN()) {
228#ifdef AMREX_USE_GPU
229 if constexpr (RunOnGpu<Allocator<T>>::value) {
230 amrex::fill_snan<RunOn::Device>(data, count);
232 return;
233 } else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value) {
234 if (allocator.arena()->isManaged() ||
235 allocator.arena()->isDevice())
236 {
237 amrex::fill_snan<RunOn::Device>(data, count);
239 return;
240 }
241 }
242#endif
243 amrex::fill_snan<RunOn::Host>(data, count);
244 }
245 }
246 }
247 }
249
251
252 namespace VectorGrowthStrategy
253 {
255 inline Real GetGrowthFactor () { return growth_factor; }
256 inline void SetGrowthFactor (Real a_factor);
257
259 namespace detail
260 {
261 void ValidateUserInput ();
262 }
264
265 void Initialize ();
266 }
267
268 inline std::size_t grow_podvector_capacity (GrowthStrategy strategy, std::size_t new_size,
269 std::size_t old_capacity, std::size_t sizeof_T)
270 {
271 switch (strategy) {
273 if (new_size <= 900) {
274 // 3*sqrt(900) = 900/10. Note that we don't need to be precise
275 // here. Even if later we change the else block to
276 // 4*std::sqrt(s), it's not really an issue to still use 900
277 // here.
278 return new_size + new_size/10;
279 } else {
280 return new_size + std::size_t(3*std::sqrt(new_size));
281 }
283 return new_size;
285 if (old_capacity == 0) {
286 return std::max(64/sizeof_T, new_size);
287 } else {
289 if (amrex::almostEqual(gf, Real(1.5))) {
290 return std::max((old_capacity*3+1)/2, new_size);
291 } else {
292 return std::max(std::size_t(gf*Real(old_capacity+1)), new_size);
293 }
294 }
295 }
296 return 0; // unreachable
297 }
298
306 template <class T, class Allocator = std::allocator<T> >
307 class PODVector : public Allocator
308 {
309 // static_assert(std::is_standard_layout<T>(), "PODVector can only hold standard layout types");
310 static_assert(std::is_trivially_copyable<T>(), "PODVector can only hold trivially copyable types");
311 // static_assert(std::is_trivially_default_constructible<T>(), "PODVector can only hold trivial dc types");
312
313 using Allocator::allocate;
314 using Allocator::deallocate;
315
316 public:
317 using value_type = T;
318 using allocator_type = Allocator;
319 using size_type = std::size_t;
320 using difference_type = std::ptrdiff_t;
321
322 using reference = T&;
323 using pointer = T*;
324 using iterator = T*;
325 using reverse_iterator = std::reverse_iterator<iterator>;
326
327 using const_reference = const T&;
328 using const_pointer = const T*;
329 using const_iterator = const T*;
330 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
331
332 private:
333 pointer m_data = nullptr;
334 size_type m_size{0}, m_capacity{0};
335
336 public:
337 constexpr PODVector () noexcept = default;
338
339 constexpr explicit PODVector (const allocator_type& a_allocator) noexcept
340 : Allocator(a_allocator)
341 {}
342
343 explicit PODVector (size_type a_size)
344 : m_size(a_size),
345 m_capacity(grow_podvector_capacity(GrowthStrategy::Poisson, a_size, 0, sizeof(T)))
346 {
347 if (m_capacity != 0) {
348 m_data = allocate(m_capacity);
349 if (a_size != 0) {
350 detail::maybe_init_snan(m_data, m_size, (Allocator const&)(*this));
351 }
352 }
353 }
354
355 PODVector (size_type a_size, const value_type& a_value,
356 const allocator_type& a_allocator = Allocator())
357 : Allocator(a_allocator), m_size(a_size),
358 m_capacity(grow_podvector_capacity(GrowthStrategy::Poisson, a_size, 0, sizeof(T)))
359 {
360 if (m_capacity != 0) {
361 m_data = allocate(m_capacity);
362 if (a_size != 0) {
363 detail::uninitializedFillNImpl(m_data, a_size, a_value,
364 (Allocator const&)(*this));
365 }
366 }
367 }
368
369 PODVector (std::initializer_list<T> a_initializer_list,
370 const allocator_type& a_allocator = Allocator())
371 : Allocator(a_allocator),
372 m_size (a_initializer_list.size()),
373 m_capacity(grow_podvector_capacity(
374 GrowthStrategy::Poisson, a_initializer_list.size(), 0, sizeof(T)))
375 {
376 if (m_capacity != 0) {
377 m_data = allocate(m_capacity);
378 if (a_initializer_list.size() != 0) {
379 detail::initFromListImpl(m_data, a_initializer_list,
380 (Allocator const&)(*this));
381 }
382 }
383 }
384
386 : Allocator(a_vector),
387 m_size (a_vector.size()),
388 m_capacity(a_vector.capacity())
389 {
390 if (m_capacity != 0) {
391 m_data = allocate(m_capacity);
392 if (a_vector.size() != 0) {
393 detail::memCopyImpl(m_data, a_vector.m_data, a_vector.nBytes(),
394 (Allocator const&)(*this),
395 (Allocator const&)a_vector);
396 }
397 }
398 }
399
400 PODVector (PODVector<T, Allocator>&& a_vector) noexcept
401 : Allocator(static_cast<Allocator&&>(a_vector)),
402 m_data(a_vector.m_data),
403 m_size(a_vector.m_size),
404 m_capacity(a_vector.m_capacity)
405 {
406 a_vector.m_data = nullptr;
407 a_vector.m_size = 0;
408 a_vector.m_capacity = 0;
409 }
410
412 {
413 // let's not worry about other allocators
414 static_assert(std::is_same<Allocator,std::allocator<T>>::value ||
416 if (m_data != nullptr) {
417 deallocate(m_data, capacity());
418 }
419 }
420
422 {
423 if (this == &a_vector) { return *this; }
424
425 if ((Allocator const&)(*this) != (Allocator const&)a_vector) {
426 if (m_data != nullptr) {
427 deallocate(m_data, m_capacity);
428 m_data = nullptr;
429 m_size = 0;
430 m_capacity = 0;
431 }
432 (Allocator&)(*this) = (Allocator const&)a_vector;
433 }
434
435 const auto other_size = a_vector.size();
436 if ( other_size > m_capacity ) {
437 clear();
438 reserve_doit(other_size);
439 }
440
441 m_size = other_size;
442 if (m_size > 0) {
443 detail::memCopyImpl(m_data, a_vector.m_data, nBytes(),
444 (Allocator const&)(*this),
445 (Allocator const&)a_vector);
446 }
447 return *this;
448 }
449
451 {
452 if (this == &a_vector) { return *this; }
453
454 if (static_cast<Allocator const&>(a_vector) ==
455 static_cast<Allocator const&>(*this))
456 {
457 if (m_data != nullptr) {
458 deallocate(m_data, m_capacity);
459 }
460
461 m_data = a_vector.m_data;
462 m_size = a_vector.m_size;
463 m_capacity = a_vector.m_capacity;
464
465 a_vector.m_data = nullptr;
466 a_vector.m_size = 0;
467 a_vector.m_capacity = 0;
468 }
469 else
470 {
471 // if the allocators are not the same we give up and copy
472 *this = a_vector; // must copy instead of move
473 }
474
475 return *this;
476 }
477
479 {
480 auto* pos = const_cast<iterator>(a_pos);
481 --m_size;
482 detail::memMoveImpl(pos, a_pos+1, (end() - pos)*sizeof(T),
483 (Allocator const&)(*this));
484 return pos;
485 }
486
488 {
489 size_type num_to_erase = a_last - a_first;
490 auto* first = const_cast<iterator>(a_first);
491 if (num_to_erase > 0) {
492 m_size -= num_to_erase;
493 detail::memMoveImpl(first, a_last, (end() - first)*sizeof(T),
494 (Allocator const&)(*this));
495 }
496 return first;
497 }
498
499 iterator insert (const_iterator a_pos, const T& a_item)
500 {
501 return insert(a_pos, 1, a_item);
502 }
503
504 iterator insert (const_iterator a_pos, size_type a_count, const T& a_value)
505 {
506 auto* pos = const_cast<iterator>(a_pos);
507 if (a_count > 0) {
508 if (m_capacity < m_size + a_count)
509 {
510 std::size_t insert_index = std::distance(m_data, pos);
511 AllocateBufferForInsert(insert_index, a_count);
512 pos = m_data + insert_index;
513 }
514 else
515 {
516 detail::memMoveImpl(pos+a_count, a_pos, (end() - pos) * sizeof(T),
517 (Allocator const&)(*this));
518 m_size += a_count;
519 }
520 detail::uninitializedFillNImpl(pos, a_count, a_value,
521 (Allocator const&)(*this));
522 }
523 return pos;
524 }
525
526 iterator insert (const_iterator a_pos, T&& a_item)
527 {
528 // This is *POD* vector after all
529 return insert(a_pos, 1, std::move(a_item));
530 }
531
533 std::initializer_list<T> a_initializer_list)
534 {
535 auto* pos = const_cast<iterator>(a_pos);
536 size_type count = a_initializer_list.size();
537 if (count > 0) {
538 if (m_capacity < m_size + count)
539 {
540 std::size_t insert_index = std::distance(m_data, pos);
541 AllocateBufferForInsert(insert_index, count);
542 pos = m_data + insert_index;
543 }
544 else
545 {
546 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
547 (Allocator const&)(*this));
548 m_size += count;
549 }
550 detail::initFromListImpl(pos, a_initializer_list,
551 (Allocator const&)(*this));
552 }
553 return pos;
554 }
555
556 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
557 iterator insert (const_iterator a_pos, InputIt a_first, InputIt a_last)
558 {
559 auto* pos = const_cast<iterator>(a_pos);
560 size_type count = std::distance(a_first, a_last);
561 if (count > 0) {
562 if (m_capacity < m_size + count)
563 {
564 std::size_t insert_index = std::distance(m_data, pos);
565 AllocateBufferForInsert(insert_index, count);
566 pos = m_data + insert_index;
567 }
568 else
569 {
570 detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
571 (Allocator const&)(*this));
572 m_size += count;
573 }
574 // Unfortunately we don't know whether InputIt points
575 // GPU or CPU memory. We will assume it's the same as
576 // the vector.
577 detail::fillValuesImpl(pos, a_first, count,
578 (Allocator const&)(*this));
579 }
580 return pos;
581 }
582
583 void assign (size_type a_count, const T& a_value)
584 {
585 if ( a_count > m_capacity ) {
586 clear();
587 reserve(a_count);
588 }
589 m_size = a_count;
590 detail::uninitializedFillNImpl(m_data, a_count, a_value,
591 (Allocator const&)(*this));
592 }
593
594 void assign (std::initializer_list<T> a_initializer_list)
595 {
596 if (a_initializer_list.size() > m_capacity) {
597 clear();
598 reserve(a_initializer_list.size());
599 }
600 m_size = a_initializer_list.size();
601 detail::initFromListImpl(m_data, a_initializer_list,
602 (Allocator const&)(*this));
603 }
604
605 template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
606 void assign (InputIt a_first, InputIt a_last)
607 {
608 std::size_t count = std::distance(a_first, a_last);
609 if (count > m_capacity) {
610 clear();
611 reserve(count);
612 }
613 m_size = count;
614 detail::fillValuesImpl(m_data, a_first, count,
615 (Allocator const&)(*this));
616 }
617
622 void assign (const T& a_value)
623 {
624 assign(m_size, a_value);
625 }
626
627 [[nodiscard]] allocator_type get_allocator () const noexcept { return *this; }
628
629 void push_back (const T& a_value)
630 {
631 if (m_size == m_capacity) {
632 auto new_capacity = GetNewCapacityForPush();
633 AllocateBufferForPush(new_capacity);
634 }
635 detail::uninitializedFillNImpl(m_data+m_size, 1, a_value,
636 (Allocator const&)(*this));
637 ++m_size;
638 }
639
640 // Because T is trivial, there is no need for push_back(T&&)
641
642 // Don't have the emplace methods, but not sure how often we use those.
643
644 void pop_back () noexcept { --m_size; }
645
646 void clear () noexcept { m_size = 0; }
647
648 [[nodiscard]] size_type size () const noexcept { return m_size; }
649
650 [[nodiscard]] size_type capacity () const noexcept { return m_capacity; }
651
652 [[nodiscard]] bool empty () const noexcept { return m_size == 0 || m_data == nullptr; } // test m_data to avoid compiler warning
653
654 [[nodiscard]] T& operator[] (size_type a_index) noexcept { return m_data[a_index]; }
655
656 [[nodiscard]] const T& operator[] (size_type a_index) const noexcept { return m_data[a_index]; }
657
658 [[nodiscard]] T& front () noexcept { return *m_data; }
659
660 [[nodiscard]] const T& front () const noexcept { return *m_data; }
661
662 [[nodiscard]] T& back () noexcept { return *(m_data + m_size - 1); }
663
664 [[nodiscard]] const T& back () const noexcept { return *(m_data + m_size - 1); }
665
666 [[nodiscard]] T* data () noexcept { return m_data; }
667
668 [[nodiscard]] const T* data () const noexcept { return m_data; }
669
670 [[nodiscard]] T* dataPtr () noexcept { return m_data; }
671
672 [[nodiscard]] const T* dataPtr () const noexcept { return m_data; }
673
674 [[nodiscard]] iterator begin () noexcept { return m_data; }
675
676 [[nodiscard]] const_iterator begin () const noexcept { return m_data; }
677
678 [[nodiscard]] iterator end () noexcept { return m_data + m_size; }
679
680 [[nodiscard]] const_iterator end () const noexcept { return m_data + m_size; }
681
682 [[nodiscard]] reverse_iterator rbegin () noexcept { return reverse_iterator(end()); }
683
684 [[nodiscard]] const_reverse_iterator rbegin () const noexcept { return const_reverse_iterator(end()); }
685
686 [[nodiscard]] reverse_iterator rend () noexcept { return reverse_iterator(begin()); }
687
688 [[nodiscard]] const_reverse_iterator rend () const noexcept { return const_reverse_iterator(begin()); }
689
690 [[nodiscard]] const_iterator cbegin () const noexcept { return m_data; }
691
692 [[nodiscard]] const_iterator cend () const noexcept { return m_data + m_size; }
693
694 [[nodiscard]] const_reverse_iterator crbegin () const noexcept { return const_reverse_iterator(end()); }
695
696 [[nodiscard]] const_reverse_iterator crend () const noexcept { return const_reverse_iterator(begin()); }
697
728 void resize (size_type a_new_size,
730 {
731 auto old_size = m_size;
732 resize_without_init_snan(a_new_size, strategy);
733 if (old_size < a_new_size) {
734 detail::maybe_init_snan(m_data + old_size,
735 m_size - old_size, (Allocator const&)(*this));
736 }
737 }
738
769 void resize (size_type a_new_size, const T& a_val,
771 {
772 size_type old_size = m_size;
773 resize_without_init_snan(a_new_size, strategy);
774 if (old_size < a_new_size)
775 {
776 detail::uninitializedFillNImpl(m_data + old_size,
777 m_size - old_size, a_val,
778 (Allocator const&)(*this));
779 }
780 }
781
812 {
813 if (m_capacity < a_capacity) {
814 reserve_doit(grow_podvector_capacity(strategy, a_capacity, m_capacity, sizeof(T)));
815 }
816 }
817
819 {
820 if (m_data != nullptr) {
821 if (m_size == 0) {
822 deallocate(m_data, m_capacity);
823 m_data = nullptr;
824 m_capacity = 0;
825 } else if (m_size < m_capacity) {
826 auto* new_data = detail::shrink_in_place(m_data, m_size,
827 (Allocator&)(*this));
828 if (new_data != m_data) {
829 detail::memCopyImpl(new_data, m_data, nBytes(),
830 (Allocator const&)(*this),
831 (Allocator const&)(*this));
832 deallocate(m_data, m_capacity);
833 }
834 m_data = new_data;
835 m_capacity = m_size;
836 }
837 }
838 }
839
840 void swap (PODVector<T, Allocator>& a_vector) noexcept
841 {
842 std::swap(m_data, a_vector.m_data);
843 std::swap(m_size, a_vector.m_size);
844 std::swap(m_capacity, a_vector.m_capacity);
845 std::swap(static_cast<Allocator&>(a_vector), static_cast<Allocator&>(*this));
846 }
847
853 void free_async () noexcept
854 {
855 if (m_data != nullptr) {
857 Gpu::freeAsync(Allocator::arena(), m_data);
858 } else {
859 deallocate(m_data, capacity());
860 }
861 m_data = nullptr;
862 m_size = 0;
863 m_capacity = 0;
864 }
865 }
866
867 private:
868
869 void reserve_doit (size_type a_capacity) {
870 if (m_capacity < a_capacity) {
871 auto fp = detail::allocate_in_place(m_data, a_capacity, a_capacity,
872 (Allocator&)(*this));
873 UpdateDataPtr(fp);
874 }
875 }
876
877 [[nodiscard]] size_type nBytes () const noexcept
878 {
879 return m_size*sizeof(T);
880 }
881
882 // this is where we would change the growth strategy for push_back
883 [[nodiscard]] size_type GetNewCapacityForPush () const noexcept
884 {
886 m_capacity, sizeof(T));
887 }
888
889 void UpdateDataPtr (FatPtr<T> const& fp)
890 {
891 auto* new_data = fp.ptr();
892 auto new_capacity = fp.size();
893 if (m_data != nullptr && m_data != new_data) {
894 if (m_size > 0) {
895 detail::memCopyImpl(new_data, m_data, nBytes(),
896 (Allocator const&)(*this),
897 (Allocator const&)(*this));
898 }
899 deallocate(m_data, capacity());
900 }
901 m_data = new_data;
902 m_capacity = new_capacity;
903 }
904
905 // This is where we play games with the allocator. This function
906 // updates m_data and m_capacity, but not m_size.
907 void AllocateBufferForPush (size_type target_capacity)
908 {
909 auto fp = detail::allocate_in_place(m_data, m_size+1, target_capacity,
910 (Allocator&)(*this));
911 UpdateDataPtr(fp);
912 }
913
914 // This is where we play games with the allocator and the growth
915 // strategy for insert. This function updates m_data, m_size and
916 // m_capacity.
917 void AllocateBufferForInsert (size_type a_index, size_type a_count)
918 {
919 size_type new_size = m_size + a_count;
920 size_type new_capacity = std::max(new_size, GetNewCapacityForPush());
921 auto fp = detail::allocate_in_place(m_data, new_size, new_capacity,
922 (Allocator&)(*this));
923 auto* new_data = fp.ptr();
924 new_capacity = fp.size();
925
926 if (m_data != nullptr) {
927 if (m_data == new_data) {
928 if (m_size > a_index) {
929 detail::memMoveImpl(m_data+a_index+a_count, m_data+a_index,
930 (m_size-a_index)*sizeof(T),
931 (Allocator const&)(*this));
932 }
933 } else {
934 if (m_size > 0) {
935 if (a_index > 0) {
936 detail::memCopyImpl(new_data, m_data, a_index*sizeof(T),
937 (Allocator const&)(*this),
938 (Allocator const&)(*this), false);
939 }
940 if (m_size > a_index) {
941 detail::memCopyImpl(new_data+a_index+a_count, m_data+a_index,
942 (m_size-a_index)*sizeof(T),
943 (Allocator const&)(*this),
944 (Allocator const&)(*this), false);
945 }
947 }
948 deallocate(m_data, m_capacity);
949 }
950 }
951 m_data = new_data;
952 m_size = new_size;
953 m_capacity = new_capacity;
954 }
955
956 void resize_without_init_snan (size_type a_new_size, GrowthStrategy strategy)
957 {
958 if (m_capacity < a_new_size) {
959 reserve(a_new_size, strategy);
960 }
961 m_size = a_new_size;
962 }
963 };
964}
965
966#endif
#define AMREX_ENUM(CLASS,...)
Definition AMReX_Enum.H:208
#define AMREX_EXPORT
Definition AMReX_Extension.H:191
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
static void streamSynchronize() noexcept
Definition AMReX_GpuDevice.cpp:757
Dynamically allocated vector for trivially copyable data.
Definition AMReX_PODVector.H:308
PODVector(std::initializer_list< T > a_initializer_list, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:369
iterator insert(const_iterator a_pos, T &&a_item)
Definition AMReX_PODVector.H:526
void reserve(size_type a_capacity, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:811
const_iterator begin() const noexcept
Definition AMReX_PODVector.H:676
const_iterator cbegin() const noexcept
Definition AMReX_PODVector.H:690
iterator insert(const_iterator a_pos, const T &a_item)
Definition AMReX_PODVector.H:499
PODVector & operator=(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:421
iterator erase(const_iterator a_pos)
Definition AMReX_PODVector.H:478
iterator insert(const_iterator a_pos, size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:504
size_type size() const noexcept
Definition AMReX_PODVector.H:648
const T * const_pointer
Definition AMReX_PODVector.H:328
void swap(PODVector< T, Allocator > &a_vector) noexcept
Definition AMReX_PODVector.H:840
const_reverse_iterator crbegin() const noexcept
Definition AMReX_PODVector.H:694
PODVector(const PODVector< T, Allocator > &a_vector)
Definition AMReX_PODVector.H:385
std::reverse_iterator< iterator > reverse_iterator
Definition AMReX_PODVector.H:325
T * pointer
Definition AMReX_PODVector.H:323
void shrink_to_fit()
Definition AMReX_PODVector.H:818
void assign(const T &a_value)
Definition AMReX_PODVector.H:622
void pop_back() noexcept
Definition AMReX_PODVector.H:644
iterator insert(const_iterator a_pos, std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:532
iterator insert(const_iterator a_pos, InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:557
T * iterator
Definition AMReX_PODVector.H:324
reverse_iterator rend() noexcept
Definition AMReX_PODVector.H:686
PODVector(size_type a_size)
Definition AMReX_PODVector.H:343
allocator_type get_allocator() const noexcept
Definition AMReX_PODVector.H:627
iterator begin() noexcept
Definition AMReX_PODVector.H:674
void resize(size_type a_new_size, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:728
void free_async() noexcept
Definition AMReX_PODVector.H:853
void assign(std::initializer_list< T > a_initializer_list)
Definition AMReX_PODVector.H:594
iterator end() noexcept
Definition AMReX_PODVector.H:678
T value_type
Definition AMReX_PODVector.H:317
const T * dataPtr() const noexcept
Definition AMReX_PODVector.H:672
constexpr PODVector() noexcept=default
void assign(size_type a_count, const T &a_value)
Definition AMReX_PODVector.H:583
const_reverse_iterator rbegin() const noexcept
Definition AMReX_PODVector.H:684
std::size_t size_type
Definition AMReX_PODVector.H:319
const_reverse_iterator rend() const noexcept
Definition AMReX_PODVector.H:688
void assign(InputIt a_first, InputIt a_last)
Definition AMReX_PODVector.H:606
T & front() noexcept
Definition AMReX_PODVector.H:658
const_reverse_iterator crend() const noexcept
Definition AMReX_PODVector.H:696
reverse_iterator rbegin() noexcept
Definition AMReX_PODVector.H:682
iterator erase(const_iterator a_first, const_iterator a_last)
Definition AMReX_PODVector.H:487
void clear() noexcept
Definition AMReX_PODVector.H:646
size_type capacity() const noexcept
Definition AMReX_PODVector.H:650
const_iterator cend() const noexcept
Definition AMReX_PODVector.H:692
T * dataPtr() noexcept
Definition AMReX_PODVector.H:670
PODVector(PODVector< T, Allocator > &&a_vector) noexcept
Definition AMReX_PODVector.H:400
T & reference
Definition AMReX_PODVector.H:322
const T * data() const noexcept
Definition AMReX_PODVector.H:668
const T * const_iterator
Definition AMReX_PODVector.H:329
~PODVector()
Definition AMReX_PODVector.H:411
const_iterator end() const noexcept
Definition AMReX_PODVector.H:680
void resize(size_type a_new_size, const T &a_val, GrowthStrategy strategy=GrowthStrategy::Poisson)
Definition AMReX_PODVector.H:769
const T & const_reference
Definition AMReX_PODVector.H:327
std::reverse_iterator< const_iterator > const_reverse_iterator
Definition AMReX_PODVector.H:330
T & back() noexcept
Definition AMReX_PODVector.H:662
std::ptrdiff_t difference_type
Definition AMReX_PODVector.H:320
const T & back() const noexcept
Definition AMReX_PODVector.H:664
T & operator[](size_type a_index) noexcept
Definition AMReX_PODVector.H:654
PODVector(size_type a_size, const value_type &a_value, const allocator_type &a_allocator=Allocator())
Definition AMReX_PODVector.H:355
T * data() noexcept
Definition AMReX_PODVector.H:666
const T & front() const noexcept
Definition AMReX_PODVector.H:660
bool empty() const noexcept
Definition AMReX_PODVector.H:652
void push_back(const T &a_value)
Definition AMReX_PODVector.H:629
Allocator allocator_type
Definition AMReX_PODVector.H:318
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
Arena * The_Arena()
Definition AMReX_Arena.cpp:783
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:329
void freeAsync(Arena *arena, void *mem) noexcept
Definition AMReX_GpuDevice.H:284
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:263
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:315
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:301
void Initialize()
Definition AMReX_PODVector.cpp:36
Real growth_factor
Definition AMReX_PODVector.cpp:7
Real GetGrowthFactor()
Definition AMReX_PODVector.H:255
void SetGrowthFactor(Real a_factor)
Definition AMReX_PODVector.cpp:43
Definition AMReX_Amr.cpp:49
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
bool InitSNaN() noexcept
Definition AMReX.cpp:173
GrowthStrategy
Definition AMReX_PODVector.H:250
__host__ __device__ std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition AMReX_Algorithm.H:116
std::size_t grow_podvector_capacity(GrowthStrategy strategy, std::size_t new_size, std::size_t old_capacity, std::size_t sizeof_T)
Definition AMReX_PODVector.H:268
Definition AMReX_GpuAllocators.H:167