Block-Structured AMR Software Framework
AMReX_PODVector.H
Go to the documentation of this file.
1 #ifndef AMREX_PODVECTOR_H_
2 #define AMREX_PODVECTOR_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX.H>
6 #include <AMReX_Arena.H>
7 #include <AMReX_GpuLaunch.H>
8 #include <AMReX_GpuAllocators.H>
9 #include <AMReX_GpuDevice.H>
10 #include <AMReX_MemPool.H>
11 #include <AMReX_TypeTraits.H>
12 
13 #include <iterator>
14 #include <type_traits>
15 #include <utility>
16 #include <memory>
17 #include <cstring>
18 
19 namespace amrex
20 {
21  namespace detail
22  {
23  template <typename T, typename Size, template<class> class Allocator>
24  FatPtr<T> allocate_in_place ([[maybe_unused]] T* p, [[maybe_unused]] Size nmin, Size nmax,
25  Allocator<T>& allocator)
26  {
27  if constexpr (IsArenaAllocator<Allocator<T>>::value) {
28  return allocator.allocate_in_place(p, nmin, nmax);
29  } else {
30  T* pnew = allocator.allocate(nmax);
31  return {pnew, nmax};
32  }
33  }
34 
35  template <typename T, typename Size, template<class> class Allocator>
36  T* shrink_in_place ([[maybe_unused]] T* p, Size n, Allocator<T>& allocator)
37  {
38  if constexpr (IsArenaAllocator<Allocator<T>>::value) {
39  return allocator.shrink_in_place(p, n);
40  } else {
41  return allocator.allocate(n);
42  }
43  }
44 
45  template <typename T, typename Size, template<class> class Allocator>
46  void uninitializedFillNImpl (T* data, Size count, const T& value,
47  [[maybe_unused]] Allocator<T> const& allocator)
48  {
49 #ifdef AMREX_USE_GPU
50  if constexpr (RunOnGpu<Allocator<T>>::value)
51  {
52  amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
53  data[i] = value;
54  });
56  return;
57  }
58  else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
59  {
60  if (allocator.arena()->isManaged() ||
61  allocator.arena()->isDevice())
62  {
63  amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
64  {
65  data[i] = value;
66  });
68  return;
69  }
70  }
71 #endif
72  std::uninitialized_fill_n(data, count, value);
73  }
74 
75  template <typename T, template<class> class Allocator>
76  void initFromListImpl (T* data, std::initializer_list<T> const& list,
77  [[maybe_unused]] Allocator<T> const & allocator)
78  {
79  auto count = list.size() * sizeof(T);
80 #ifdef AMREX_USE_GPU
81  if constexpr (RunOnGpu<Allocator<T>>::value)
82  {
83  Gpu::htod_memcpy_async(data, std::data(list), count);
85  return;
86  }
87  else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
88  {
89  if (allocator.arena()->isManaged() ||
90  allocator.arena()->isDevice())
91  {
92  Gpu::htod_memcpy_async(data, std::data(list), count);
94  return;
95  }
96  }
97 #endif
98  std::memcpy(data, std::data(list), count);
99  }
100 
101  template <typename T, typename Size, template<class> class Allocator>
102  void fillValuesImpl (T* dst, T const* src, Size count,
103  [[maybe_unused]] Allocator<T> const& allocator)
104  {
105 #ifdef AMREX_USE_GPU
106  if constexpr (RunOnGpu<Allocator<T>>::value)
107  {
108  amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept {
109  dst[i] = src[i];
110  });
112  return;
113  }
114  else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value)
115  {
116  if (allocator.arena()->isManaged() ||
117  allocator.arena()->isDevice())
118  {
119  amrex::ParallelFor(count, [=] AMREX_GPU_DEVICE (Size i) noexcept
120  {
121  dst[i] = src[i];
122  });
124  return;
125  }
126  }
127 #else
128  static_assert(RunOnGpu<Allocator<T>>::value == false);
129 #endif
130  if constexpr (! RunOnGpu<Allocator<T>>::value) {
131  for (Size i = 0; i < count; ++i) { dst[i] = src[i]; }
132  }
133  }
134 
135  template <typename Allocator>
136  void memCopyImpl (void* dst, const void* src, std::size_t count,
137  [[maybe_unused]] Allocator const& dst_allocator,
138  [[maybe_unused]] Allocator const& src_allocator,
139  [[maybe_unused]] bool sync = true)
140  {
141 #ifdef AMREX_USE_GPU
142  if constexpr (RunOnGpu<Allocator>::value)
143  {
144  Gpu::dtod_memcpy_async(dst, src, count);
145  if (sync) { Gpu::streamSynchronize(); }
146  return;
147  }
149  {
150  bool dst_on_device = dst_allocator.arena()->isManaged() ||
151  dst_allocator.arena()->isDevice();
152  bool src_on_device = src_allocator.arena()->isManaged() ||
153  src_allocator.arena()->isDevice();
154  if (dst_on_device || src_on_device)
155  {
156  if (dst_on_device && src_on_device) {
157  Gpu::dtod_memcpy_async(dst, src, count);
158  } else if (dst_on_device) {
159  Gpu::htod_memcpy_async(dst, src, count);
160  } else {
161  Gpu::dtoh_memcpy_async(dst, src, count);
162  }
163  if (sync) { Gpu::streamSynchronize(); }
164  return;
165  }
166  }
167 #endif
168  std::memcpy(dst, src, count);
169  }
170 
171  template <typename Allocator>
172  void memMoveImpl (void* dst, const void* src, std::size_t count,
173  [[maybe_unused]] Allocator const& allocator)
174  {
175 #ifdef AMREX_USE_GPU
176  if constexpr (RunOnGpu<Allocator>::value)
177  {
178  auto* tmp = The_Arena()->alloc(count);
179  Gpu::dtod_memcpy_async(tmp, src, count);
180  Gpu::dtod_memcpy_async(dst, tmp, count);
182  The_Arena()->free(tmp);
183  return;
184  }
186  {
187  if (allocator.arena()->isManaged() ||
188  allocator.arena()->isDevice())
189  {
190  auto* tmp = The_Arena()->alloc(count);
191  Gpu::dtod_memcpy_async(tmp, src, count);
192  Gpu::dtod_memcpy_async(dst, tmp, count);
194  The_Arena()->free(tmp);
195  return;
196  }
197  }
198 #endif
199  std::memmove(dst, src, count);
200  }
201 
202  template <typename T, typename Size, template<class> class Allocator>
203  void maybe_init_snan (T* data, Size count, Allocator<T> const& allocator)
204  {
205  amrex::ignore_unused(data, count, allocator);
206  if constexpr (std::is_same_v<float, std::remove_cv_t<T>> ||
207  std::is_same_v<double, std::remove_cv_t<T>>) {
208  if (amrex::InitSNaN()) {
209 #ifdef AMREX_USE_GPU
210  if constexpr (RunOnGpu<Allocator<T>>::value) {
211  amrex::fill_snan<RunOn::Device>(data, count);
213  return;
214  } else if constexpr (IsPolymorphicArenaAllocator<Allocator<T>>::value) {
215  if (allocator.arena()->isManaged() ||
216  allocator.arena()->isDevice())
217  {
218  amrex::fill_snan<RunOn::Device>(data, count);
220  return;
221  }
222  }
223 #endif
224  amrex::fill_snan<RunOn::Host>(data, count);
225  }
226  }
227  }
228  }
229 
230  namespace VectorGrowthStrategy
231  {
232  extern AMREX_EXPORT Real growth_factor;
233  inline Real GetGrowthFactor () { return growth_factor; }
234  inline void SetGrowthFactor (Real a_factor);
235 
236  namespace detail
237  {
238  void ValidateUserInput ();
239  }
240 
241  void Initialize ();
242  }
243 
244  template <class T, class Allocator = std::allocator<T> >
245  class PODVector : public Allocator
246  {
247  // static_assert(std::is_standard_layout<T>(), "PODVector can only hold standard layout types");
248  static_assert(std::is_trivially_copyable<T>(), "PODVector can only hold trivially copyable types");
249  // static_assert(std::is_trivially_default_constructible<T>(), "PODVector can only hold trivial dc types");
250 
251  using Allocator::allocate;
252  using Allocator::deallocate;
253 
254  public:
255  using value_type = T;
256  using allocator_type = Allocator;
257  using size_type = std::size_t;
258  using difference_type = std::ptrdiff_t;
259 
260  using reference = T&;
261  using pointer = T*;
262  using iterator = T*;
263  using reverse_iterator = std::reverse_iterator<iterator>;
264 
265  using const_reference = const T&;
266  using const_pointer = const T*;
267  using const_iterator = const T*;
268  using const_reverse_iterator = std::reverse_iterator<const_iterator>;
269 
270  private:
271  pointer m_data = nullptr;
273 
274  public:
275  constexpr PODVector () noexcept = default;
276 
277  constexpr explicit PODVector (const allocator_type& a_allocator) noexcept
278  : Allocator(a_allocator)
279  {}
280 
281  explicit PODVector (size_type a_size)
282  : m_size(a_size), m_capacity(a_size)
283  {
284  if (a_size != 0) {
285  m_data = allocate(m_size);
286  detail::maybe_init_snan(m_data, m_size, (Allocator const&)(*this));
287  }
288  }
289 
290  PODVector (size_type a_size, const value_type& a_value,
291  const allocator_type& a_allocator = Allocator())
292  : Allocator(a_allocator), m_size(a_size), m_capacity(a_size)
293  {
294  if (a_size != 0) {
295  m_data = allocate(m_size);
296  detail::uninitializedFillNImpl(m_data, a_size, a_value,
297  (Allocator const&)(*this));
298  }
299  }
300 
301  PODVector (std::initializer_list<T> a_initializer_list,
302  const allocator_type& a_allocator = Allocator())
303  : Allocator(a_allocator),
304  m_size (a_initializer_list.size()),
305  m_capacity(a_initializer_list.size())
306  {
307  if (a_initializer_list.size() != 0) {
308  m_data = allocate(m_size);
309  detail::initFromListImpl(m_data, a_initializer_list,
310  (Allocator const&)(*this));
311  }
312  }
313 
315  : Allocator(a_vector),
316  m_size (a_vector.size()),
317  m_capacity(a_vector.size())
318  {
319  if (a_vector.size() != 0) {
320  m_data = allocate(m_size);
321  detail::memCopyImpl(m_data, a_vector.m_data, a_vector.nBytes(),
322  (Allocator const&)(*this),
323  (Allocator const&)a_vector);
324  }
325  }
326 
327  PODVector (PODVector<T, Allocator>&& a_vector) noexcept
328  : Allocator(static_cast<Allocator&&>(a_vector)),
329  m_data(a_vector.m_data),
330  m_size(a_vector.m_size),
331  m_capacity(a_vector.m_capacity)
332  {
333  a_vector.m_data = nullptr;
334  a_vector.m_size = 0;
335  a_vector.m_capacity = 0;
336  }
337 
339  {
340  // let's not worry about other allocators
341  static_assert(std::is_same<Allocator,std::allocator<T>>::value ||
343  if (m_data != nullptr) {
344  deallocate(m_data, capacity());
345  }
346  }
347 
349  {
350  if (this == &a_vector) { return *this; }
351 
352  if ((Allocator const&)(*this) != (Allocator const&)a_vector) {
353  if (m_data != nullptr) {
354  deallocate(m_data, m_capacity);
355  m_data = nullptr;
356  m_size = 0;
357  m_capacity = 0;
358  }
359  (Allocator&)(*this) = (Allocator const&)a_vector;
360  }
361 
362  const auto other_size = a_vector.size();
363  if ( other_size > m_capacity ) {
364  clear();
365  reserve(other_size);
366  }
367 
368  m_size = other_size;
369  if (m_size > 0) {
371  (Allocator const&)(*this),
372  (Allocator const&)a_vector);
373  }
374  return *this;
375  }
376 
378  {
379  if (this == &a_vector) { return *this; }
380 
381  if (static_cast<Allocator const&>(a_vector) ==
382  static_cast<Allocator const&>(*this))
383  {
384  if (m_data != nullptr) {
385  deallocate(m_data, m_capacity);
386  }
387 
388  m_data = a_vector.m_data;
389  m_size = a_vector.m_size;
390  m_capacity = a_vector.m_capacity;
391 
392  a_vector.m_data = nullptr;
393  a_vector.m_size = 0;
394  a_vector.m_capacity = 0;
395  }
396  else
397  {
398  // if the allocators are not the same we give up and copy
399  *this = a_vector; // must copy instead of move
400  }
401 
402  return *this;
403  }
404 
406  {
407  auto* pos = const_cast<iterator>(a_pos);
408  --m_size;
409  detail::memMoveImpl(pos, a_pos+1, (end() - pos)*sizeof(T),
410  (Allocator const&)(*this));
411  return pos;
412  }
413 
415  {
416  size_type num_to_erase = a_last - a_first;
417  auto* first = const_cast<iterator>(a_first);
418  if (num_to_erase > 0) {
419  m_size -= num_to_erase;
420  detail::memMoveImpl(first, a_last, (end() - first)*sizeof(T),
421  (Allocator const&)(*this));
422  }
423  return first;
424  }
425 
426  iterator insert (const_iterator a_pos, const T& a_item)
427  {
428  return insert(a_pos, 1, a_item);
429  }
430 
431  iterator insert (const_iterator a_pos, size_type a_count, const T& a_value)
432  {
433  auto* pos = const_cast<iterator>(a_pos);
434  if (a_count > 0) {
435  if (m_capacity < m_size + a_count)
436  {
437  std::size_t insert_index = std::distance(m_data, pos);
438  AllocateBufferForInsert(insert_index, a_count);
439  pos = m_data + insert_index;
440  }
441  else
442  {
443  detail::memMoveImpl(pos+a_count, a_pos, (end() - pos) * sizeof(T),
444  (Allocator const&)(*this));
445  m_size += a_count;
446  }
447  detail::uninitializedFillNImpl(pos, a_count, a_value,
448  (Allocator const&)(*this));
449  }
450  return pos;
451  }
452 
453  iterator insert (const_iterator a_pos, T&& a_item)
454  {
455  // This is *POD* vector after all
456  return insert(a_pos, 1, std::move(a_item));
457  }
458 
460  std::initializer_list<T> a_initializer_list)
461  {
462  auto* pos = const_cast<iterator>(a_pos);
463  size_type count = a_initializer_list.size();
464  if (count > 0) {
465  if (m_capacity < m_size + count)
466  {
467  std::size_t insert_index = std::distance(m_data, pos);
468  AllocateBufferForInsert(insert_index, count);
469  pos = m_data + insert_index;
470  }
471  else
472  {
473  detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
474  (Allocator const&)(*this));
475  m_size += count;
476  }
477  detail::initFromListImpl(pos, a_initializer_list,
478  (Allocator const&)(*this));
479  }
480  return pos;
481  }
482 
483  template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
484  iterator insert (const_iterator a_pos, InputIt a_first, InputIt a_last)
485  {
486  auto* pos = const_cast<iterator>(a_pos);
487  size_type count = std::distance(a_first, a_last);
488  if (count > 0) {
489  if (m_capacity < m_size + count)
490  {
491  std::size_t insert_index = std::distance(m_data, pos);
492  AllocateBufferForInsert(insert_index, count);
493  pos = m_data + insert_index;
494  }
495  else
496  {
497  detail::memMoveImpl(pos+count, a_pos, (end() - pos) * sizeof(T),
498  (Allocator const&)(*this));
499  m_size += count;
500  }
501  // Unfortunately we don't know whether InputIt points
502  // GPU or CPU memory. We will assume it's the same as
503  // the vector.
504  detail::fillValuesImpl(pos, a_first, count,
505  (Allocator const&)(*this));
506  }
507  return pos;
508  }
509 
510  void assign (size_type a_count, const T& a_value)
511  {
512  if ( a_count > m_capacity ) {
513  clear();
514  reserve(a_count);
515  }
516  m_size = a_count;
517  detail::uninitializedFillNImpl(m_data, a_count, a_value,
518  (Allocator const&)(*this));
519  }
520 
521  void assign (std::initializer_list<T> a_initializer_list)
522  {
523  if (a_initializer_list.size() > m_capacity) {
524  clear();
525  reserve(a_initializer_list.size());
526  }
527  m_size = a_initializer_list.size();
528  detail::initFromListImpl(m_data, a_initializer_list,
529  (Allocator const&)(*this));
530  }
531 
532  template <class InputIt, class bar = typename std::iterator_traits<InputIt>::difference_type>
533  void assign (InputIt a_first, InputIt a_last)
534  {
535  std::size_t count = std::distance(a_first, a_last);
536  if (count > m_capacity) {
537  clear();
538  reserve(count);
539  }
540  m_size = count;
541  detail::fillValuesImpl(m_data, a_first, count,
542  (Allocator const&)(*this));
543  }
544 
549  void assign (const T& a_value)
550  {
551  assign(m_size, a_value);
552  }
553 
554  [[nodiscard]] allocator_type get_allocator () const noexcept { return *this; }
555 
556  void push_back (const T& a_value)
557  {
558  if (m_size == m_capacity) {
559  auto new_capacity = GetNewCapacityForPush();
560  AllocateBufferForPush(new_capacity);
561  }
563  (Allocator const&)(*this));
564  ++m_size;
565  }
566 
567  // Because T is trivial, there is no need for push_back(T&&)
568 
569  // Don't have the emplace methods, but not sure how often we use those.
570 
571  void pop_back () noexcept { --m_size; }
572 
573  void clear () noexcept { m_size = 0; }
574 
575  [[nodiscard]] size_type size () const noexcept { return m_size; }
576 
577  [[nodiscard]] size_type capacity () const noexcept { return m_capacity; }
578 
579  [[nodiscard]] bool empty () const noexcept { return m_size == 0; }
580 
581  [[nodiscard]] T& operator[] (size_type a_index) noexcept { return m_data[a_index]; }
582 
583  [[nodiscard]] const T& operator[] (size_type a_index) const noexcept { return m_data[a_index]; }
584 
585  [[nodiscard]] T& front () noexcept { return *m_data; }
586 
587  [[nodiscard]] const T& front () const noexcept { return *m_data; }
588 
589  [[nodiscard]] T& back () noexcept { return *(m_data + m_size - 1); }
590 
591  [[nodiscard]] const T& back () const noexcept { return *(m_data + m_size - 1); }
592 
593  [[nodiscard]] T* data () noexcept { return m_data; }
594 
595  [[nodiscard]] const T* data () const noexcept { return m_data; }
596 
597  [[nodiscard]] T* dataPtr () noexcept { return m_data; }
598 
599  [[nodiscard]] const T* dataPtr () const noexcept { return m_data; }
600 
601  [[nodiscard]] iterator begin () noexcept { return m_data; }
602 
603  [[nodiscard]] const_iterator begin () const noexcept { return m_data; }
604 
605  [[nodiscard]] iterator end () noexcept { return m_data + m_size; }
606 
607  [[nodiscard]] const_iterator end () const noexcept { return m_data + m_size; }
608 
609  [[nodiscard]] reverse_iterator rbegin () noexcept { return reverse_iterator(end()); }
610 
611  [[nodiscard]] const_reverse_iterator rbegin () const noexcept { return const_reverse_iterator(end()); }
612 
613  [[nodiscard]] reverse_iterator rend () noexcept { return reverse_iterator(begin()); }
614 
615  [[nodiscard]] const_reverse_iterator rend () const noexcept { return const_reverse_iterator(begin()); }
616 
617  [[nodiscard]] const_iterator cbegin () const noexcept { return m_data; }
618 
619  [[nodiscard]] const_iterator cend () const noexcept { return m_data + m_size; }
620 
621  [[nodiscard]] const_reverse_iterator crbegin () const noexcept { return const_reverse_iterator(end()); }
622 
623  [[nodiscard]] const_reverse_iterator crend () const noexcept { return const_reverse_iterator(begin()); }
624 
625  void resize (size_type a_new_size)
626  {
627  auto old_size = m_size;
628  resize_without_init_snan(a_new_size);
629  if (old_size < a_new_size) {
630  detail::maybe_init_snan(m_data + old_size,
631  m_size - old_size, (Allocator const&)(*this));
632  }
633  }
634 
635  void resize (size_type a_new_size, const T& a_val)
636  {
637  size_type old_size = m_size;
638  resize_without_init_snan(a_new_size);
639  if (old_size < a_new_size)
640  {
642  m_size - old_size, a_val,
643  (Allocator const&)(*this));
644  }
645  }
646 
647  void reserve (size_type a_capacity)
648  {
649  if (m_capacity < a_capacity) {
650  auto fp = detail::allocate_in_place(m_data, a_capacity, a_capacity,
651  (Allocator&)(*this));
652  UpdateDataPtr(fp);
653  }
654  }
655 
657  {
658  if (m_data != nullptr) {
659  if (m_size == 0) {
660  deallocate(m_data, m_capacity);
661  m_data = nullptr;
662  m_capacity = 0;
663  } else if (m_size < m_capacity) {
664  auto* new_data = detail::shrink_in_place(m_data, m_size,
665  (Allocator&)(*this));
666  if (new_data != m_data) {
667  detail::memCopyImpl(new_data, m_data, nBytes(),
668  (Allocator const&)(*this),
669  (Allocator const&)(*this));
670  deallocate(m_data, m_capacity);
671  }
672  m_capacity = m_size;
673  }
674  }
675  }
676 
677  void swap (PODVector<T, Allocator>& a_vector) noexcept
678  {
679  std::swap(m_data, a_vector.m_data);
680  std::swap(m_size, a_vector.m_size);
681  std::swap(m_capacity, a_vector.m_capacity);
682  std::swap(static_cast<Allocator&>(a_vector), static_cast<Allocator&>(*this));
683  }
684 
685  private:
686 
687  [[nodiscard]] size_type nBytes () const noexcept
688  {
689  return m_size*sizeof(T);
690  }
691 
692  // this is where we would change the growth strategy for push_back
693  [[nodiscard]] size_type GetNewCapacityForPush () const noexcept
694  {
695  if (m_capacity == 0) {
696  return std::max(64/sizeof(T), size_type(1));
697  } else {
698  Real const gf = VectorGrowthStrategy::GetGrowthFactor();
699  if (amrex::almostEqual(gf, Real(1.5))) {
700  return (m_capacity*3+1)/2;
701  } else {
702  return size_type(gf*Real(m_capacity+1));
703  }
704  }
705  }
706 
707  void UpdateDataPtr (FatPtr<T> const& fp)
708  {
709  auto* new_data = fp.ptr();
710  auto new_capacity = fp.size();
711  if (m_data != nullptr && m_data != new_data) {
712  if (m_size > 0) {
713  detail::memCopyImpl(new_data, m_data, nBytes(),
714  (Allocator const&)(*this),
715  (Allocator const&)(*this));
716  }
717  deallocate(m_data, capacity());
718  }
719  m_data = new_data;
720  m_capacity = new_capacity;
721  }
722 
723  // This is where we play games with the allocator. This function
724  // updates m_data and m_capacity, but not m_size.
725  void AllocateBufferForPush (size_type target_capacity)
726  {
727  auto fp = detail::allocate_in_place(m_data, m_size+1, target_capacity,
728  (Allocator&)(*this));
729  UpdateDataPtr(fp);
730  }
731 
732  // This is where we play games with the allocator and the growth
733  // strategy for insert. This function updates m_data, m_size and
734  // m_capacity.
736  {
737  size_type new_size = m_size + a_count;
738  size_type new_capacity = std::max(new_size, GetNewCapacityForPush());
739  auto fp = detail::allocate_in_place(m_data, new_size, new_capacity,
740  (Allocator&)(*this));
741  auto* new_data = fp.ptr();
742  new_capacity = fp.size();
743 
744  if (m_data != nullptr) {
745  if (m_data == new_data) {
746  if (m_size > a_index) {
747  detail::memMoveImpl(m_data+a_index+a_count, m_data+a_index,
748  (m_size-a_index)*sizeof(T),
749  (Allocator const&)(*this));
750  }
751  } else {
752  if (m_size > 0) {
753  if (a_index > 0) {
754  detail::memCopyImpl(new_data, m_data, a_index*sizeof(T),
755  (Allocator const&)(*this),
756  (Allocator const&)(*this), false);
757  }
758  if (m_size > a_index) {
759  detail::memCopyImpl(new_data+a_index+a_count, m_data+a_index,
760  (m_size-a_index)*sizeof(T),
761  (Allocator const&)(*this),
762  (Allocator const&)(*this), false);
763  }
765  }
766  deallocate(m_data, m_capacity);
767  }
768  }
769  m_data = new_data;
770  m_size = new_size;
771  m_capacity = new_capacity;
772  }
773 
775  {
776  if (m_capacity < a_new_size) {
777  reserve(a_new_size);
778  }
779  m_size = a_new_size;
780  }
781  };
782 }
783 
784 #endif
#define AMREX_EXPORT
Definition: AMReX_Extension.H:191
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
static void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.cpp:641
Definition: AMReX_PODVector.H:246
PODVector(std::initializer_list< T > a_initializer_list, const allocator_type &a_allocator=Allocator())
Definition: AMReX_PODVector.H:301
iterator insert(const_iterator a_pos, T &&a_item)
Definition: AMReX_PODVector.H:453
const_iterator begin() const noexcept
Definition: AMReX_PODVector.H:603
void resize_without_init_snan(size_type a_new_size)
Definition: AMReX_PODVector.H:774
const_iterator cbegin() const noexcept
Definition: AMReX_PODVector.H:617
iterator insert(const_iterator a_pos, const T &a_item)
Definition: AMReX_PODVector.H:426
iterator erase(const_iterator a_pos)
Definition: AMReX_PODVector.H:405
void reserve(size_type a_capacity)
Definition: AMReX_PODVector.H:647
iterator insert(const_iterator a_pos, size_type a_count, const T &a_value)
Definition: AMReX_PODVector.H:431
size_type size() const noexcept
Definition: AMReX_PODVector.H:575
const T * const_pointer
Definition: AMReX_PODVector.H:266
void swap(PODVector< T, Allocator > &a_vector) noexcept
Definition: AMReX_PODVector.H:677
const T & front() const noexcept
Definition: AMReX_PODVector.H:587
void UpdateDataPtr(FatPtr< T > const &fp)
Definition: AMReX_PODVector.H:707
const_reverse_iterator crbegin() const noexcept
Definition: AMReX_PODVector.H:621
T & back() noexcept
Definition: AMReX_PODVector.H:589
PODVector(const PODVector< T, Allocator > &a_vector)
Definition: AMReX_PODVector.H:314
std::reverse_iterator< iterator > reverse_iterator
Definition: AMReX_PODVector.H:263
T * pointer
Definition: AMReX_PODVector.H:261
void shrink_to_fit()
Definition: AMReX_PODVector.H:656
void assign(const T &a_value)
Definition: AMReX_PODVector.H:549
void pop_back() noexcept
Definition: AMReX_PODVector.H:571
size_type nBytes() const noexcept
Definition: AMReX_PODVector.H:687
iterator insert(const_iterator a_pos, std::initializer_list< T > a_initializer_list)
Definition: AMReX_PODVector.H:459
iterator insert(const_iterator a_pos, InputIt a_first, InputIt a_last)
Definition: AMReX_PODVector.H:484
T * data() noexcept
Definition: AMReX_PODVector.H:593
T & operator[](size_type a_index) noexcept
Definition: AMReX_PODVector.H:581
T * iterator
Definition: AMReX_PODVector.H:262
void AllocateBufferForPush(size_type target_capacity)
Definition: AMReX_PODVector.H:725
size_type GetNewCapacityForPush() const noexcept
Definition: AMReX_PODVector.H:693
reverse_iterator rend() noexcept
Definition: AMReX_PODVector.H:613
PODVector(size_type a_size)
Definition: AMReX_PODVector.H:281
allocator_type get_allocator() const noexcept
Definition: AMReX_PODVector.H:554
iterator begin() noexcept
Definition: AMReX_PODVector.H:601
const T * data() const noexcept
Definition: AMReX_PODVector.H:595
PODVector & operator=(const PODVector< T, Allocator > &a_vector)
Definition: AMReX_PODVector.H:348
void assign(std::initializer_list< T > a_initializer_list)
Definition: AMReX_PODVector.H:521
iterator end() noexcept
Definition: AMReX_PODVector.H:605
T value_type
Definition: AMReX_PODVector.H:255
constexpr PODVector() noexcept=default
void assign(size_type a_count, const T &a_value)
Definition: AMReX_PODVector.H:510
const_reverse_iterator rbegin() const noexcept
Definition: AMReX_PODVector.H:611
std::size_t size_type
Definition: AMReX_PODVector.H:257
const T & back() const noexcept
Definition: AMReX_PODVector.H:591
const_reverse_iterator rend() const noexcept
Definition: AMReX_PODVector.H:615
size_type m_size
Definition: AMReX_PODVector.H:272
void assign(InputIt a_first, InputIt a_last)
Definition: AMReX_PODVector.H:533
pointer m_data
Definition: AMReX_PODVector.H:271
const_reverse_iterator crend() const noexcept
Definition: AMReX_PODVector.H:623
reverse_iterator rbegin() noexcept
Definition: AMReX_PODVector.H:609
iterator erase(const_iterator a_first, const_iterator a_last)
Definition: AMReX_PODVector.H:414
void clear() noexcept
Definition: AMReX_PODVector.H:573
size_type capacity() const noexcept
Definition: AMReX_PODVector.H:577
const_iterator cend() const noexcept
Definition: AMReX_PODVector.H:619
size_type m_capacity
Definition: AMReX_PODVector.H:272
PODVector(PODVector< T, Allocator > &&a_vector) noexcept
Definition: AMReX_PODVector.H:327
T & reference
Definition: AMReX_PODVector.H:260
const T * const_iterator
Definition: AMReX_PODVector.H:267
void resize(size_type a_new_size)
Definition: AMReX_PODVector.H:625
~PODVector()
Definition: AMReX_PODVector.H:338
const_iterator end() const noexcept
Definition: AMReX_PODVector.H:607
const T & const_reference
Definition: AMReX_PODVector.H:265
std::reverse_iterator< const_iterator > const_reverse_iterator
Definition: AMReX_PODVector.H:268
const T * dataPtr() const noexcept
Definition: AMReX_PODVector.H:599
T & front() noexcept
Definition: AMReX_PODVector.H:585
void resize(size_type a_new_size, const T &a_val)
Definition: AMReX_PODVector.H:635
std::ptrdiff_t difference_type
Definition: AMReX_PODVector.H:258
T * dataPtr() noexcept
Definition: AMReX_PODVector.H:597
PODVector(size_type a_size, const value_type &a_value, const allocator_type &a_allocator=Allocator())
Definition: AMReX_PODVector.H:290
bool empty() const noexcept
Definition: AMReX_PODVector.H:579
void AllocateBufferForInsert(size_type a_index, size_type a_count)
Definition: AMReX_PODVector.H:735
void push_back(const T &a_value)
Definition: AMReX_PODVector.H:556
Allocator allocator_type
Definition: AMReX_PODVector.H:256
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:279
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:265
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void * memcpy(void *dest, const void *src, std::size_t count)
Definition: AMReX_GpuUtility.H:214
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:251
void ValidateUserInput()
Definition: AMReX_PODVector.cpp:15
Real growth_factor
Definition: AMReX_PODVector.cpp:7
void Initialize()
Definition: AMReX_PODVector.cpp:34
Real GetGrowthFactor()
Definition: AMReX_PODVector.H:233
void SetGrowthFactor(Real a_factor)
Definition: AMReX_PODVector.cpp:41
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void swap(T &a, T &b) noexcept
Definition: AMReX_algoim_K.H:113
@ max
Definition: AMReX_ParallelReduce.H:17
FatPtr< T > allocate_in_place([[maybe_unused]] T *p, [[maybe_unused]] Size nmin, Size nmax, Allocator< T > &allocator)
Definition: AMReX_PODVector.H:24
void memCopyImpl(void *dst, const void *src, std::size_t count, [[maybe_unused]] Allocator const &dst_allocator, [[maybe_unused]] Allocator const &src_allocator, [[maybe_unused]] bool sync=true)
Definition: AMReX_PODVector.H:136
void uninitializedFillNImpl(T *data, Size count, const T &value, [[maybe_unused]] Allocator< T > const &allocator)
Definition: AMReX_PODVector.H:46
void initFromListImpl(T *data, std::initializer_list< T > const &list, [[maybe_unused]] Allocator< T > const &allocator)
Definition: AMReX_PODVector.H:76
T * shrink_in_place([[maybe_unused]] T *p, Size n, Allocator< T > &allocator)
Definition: AMReX_PODVector.H:36
void maybe_init_snan(T *data, Size count, Allocator< T > const &allocator)
Definition: AMReX_PODVector.H:203
void fillValuesImpl(T *dst, T const *src, Size count, [[maybe_unused]] Allocator< T > const &allocator)
Definition: AMReX_PODVector.H:102
void memMoveImpl(void *dst, const void *src, std::size_t count, [[maybe_unused]] Allocator const &allocator)
Definition: AMReX_PODVector.H:172
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
bool InitSNaN() noexcept
Definition: AMReX.cpp:168
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition: AMReX_Algorithm.H:93
Arena * The_Arena()
Definition: AMReX_Arena.cpp:609
Definition: AMReX_FabArrayCommI.H:841
Definition: AMReX_GpuAllocators.H:24
constexpr T * ptr() const noexcept
Definition: AMReX_GpuAllocators.H:27
constexpr std::size_t size() const noexcept
Definition: AMReX_GpuAllocators.H:28
Definition: AMReX_GpuAllocators.H:161
Definition: AMReX_GpuAllocators.H:172
Definition: AMReX_GpuAllocators.H:158