Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_Array4.H
Go to the documentation of this file.
1#ifndef AMREX_ARRAY4_H_
2#define AMREX_ARRAY4_H_
3
4#include <AMReX_Config.H>
5
6#include <AMReX.H>
7#include <AMReX_IntVect.H>
8#include <AMReX_GpuPrint.H>
10
11#include <iostream>
12#include <sstream>
13
15#define AMREX_ARRAY4_INDEX_ASSERT(i,j,k,n) \
16 if ((i)<begin.vect[0] || (i)>=end.vect[0] || \
17 (j)<begin.vect[1] || (j)>=end.vect[1] || \
18 (k)<begin.vect[2] || (k)>=end.vect[2] || \
19 (n)<0 || (n)>=end.vect[3]) \
20 { \
21 index_assert_print_error_message(i,j,k,n); \
22 }
24
25namespace amrex {
26
27 template<int dim>
28 class BoxND;
29
30 template <typename T>
31 struct CellData // Data in a single cell
32 {
33 T* AMREX_RESTRICT p = nullptr;
35 int ncomp = 0;
36
38 constexpr CellData (T* a_p, Long a_stride, int a_ncomp)
39 : p(a_p), stride(a_stride), ncomp(a_ncomp)
40 {}
41
42 template <class U=T,
43 std::enable_if_t<std::is_const_v<U>,int> = 0>
45 constexpr CellData (CellData<std::remove_const_t<T>> const& rhs) noexcept
46 : p(rhs.p), stride(rhs.stride), ncomp(rhs.ncomp)
47 {}
48
50 explicit operator bool() const noexcept { return p != nullptr; }
51
52 [[nodiscard]] AMREX_GPU_HOST_DEVICE
53 int nComp() const noexcept { return ncomp; }
54
55 template <class U=T,
56 std::enable_if_t<!std::is_void_v<U>,int> = 0>
58 U& operator[] (int n) const noexcept {
59#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
60 if (n < 0 || n >= ncomp) {
62 AMREX_DEVICE_PRINTF(" %d is out of bound (0:%d)", n, ncomp-1);
63 ))
65 std::stringstream ss;
66 ss << " " << n << " is out of bound: (0:" << ncomp-1 << ")";
67 amrex::Abort(ss.str());
68 ))
69 }
70#endif
71 return p[n*stride];
72 }
73 };
74
75 namespace detail {
76
77 template <int N> struct Stride { Long a[N] = {}; };
78 template <> struct Stride<0> {};
79
85 template <typename T>
86 struct IsValidIndexType {
87 static constexpr bool value = std::is_integral_v<T> && IsNonNarrowingConversion_v<T, int>;
88 };
89
96 template <int N, bool last_dim_component, typename... idx>
97 struct ArrayNDIndexCheck_impl {
98 private:
99 static constexpr int num_indices = sizeof...(idx);
100
101 template <typename... Ts>
102 struct AllExceptLastEnum;
103
104 template <typename first, typename... Ts>
105 struct AllExceptLastEnum<first, Ts...> {
106 static constexpr bool value =
107 IsValidIndexType<std::decay_t<first>>::value
108 && AllExceptLastEnum<Ts...>::value;
109 };
110
111 // Check if the last index type is an enum or valid index type.
112 // An enum is only allowed if last_dim_component is true.
113 template <typename last>
114 struct AllExceptLastEnum<last> {
115 static constexpr bool value = IsValidIndexType<std::decay_t<last>>::value
116 || (std::is_enum_v<std::decay_t<last>>);
117 };
118
119 static constexpr bool index_with_maybe_enum = AllExceptLastEnum<idx...>::value;
120
121 static constexpr bool index_all_values = Conjunction<
122 IsValidIndexType<std::decay_t<idx>>...>::value;
123
124 public:
125 static constexpr bool value = ((num_indices == N) && index_all_values) // N indices are integer types
126 || ((num_indices == N - 1) && index_all_values && last_dim_component) // N-1 indices are integer types, last is component
127 || ((num_indices == N) && index_with_maybe_enum && last_dim_component); // N indices, last dim is component and can be enum
128 };
129
130 template <int N, bool last_dim_component, class... idx>
131 inline constexpr bool ArrayNDIndexCheck_impl_v = ArrayNDIndexCheck_impl<N, last_dim_component, idx...>::value;
132
133 template<std::size_t... idx>
134 constexpr auto make_oob_message_impl (std::index_sequence<idx...>) {
135 constexpr std::size_t N = sizeof...(idx);
136 constexpr char prefix[] = " (";
137 constexpr char middle[] = ") is out of bound (";
138 constexpr char suffix[] = ")\n";
139
140 constexpr std::size_t size =
141 sizeof(prefix) - 1 +
142 (2*N + (N-1)) +
143 sizeof(middle) - 1 +
144 (5*N + (N-1)) +
145 sizeof(suffix); // include null terminator
146
147 std::array<char, size> buf{};
148
149 std::size_t pos = 0;
150
151 // prefix
152 for (char c : prefix) {
153 if (c != '\0') {
154 buf[pos++] = c;
155 }
156 }
157 // %d,%d,...
158 ((buf[pos++] = '%', buf[pos++] = 'd', idx + 1 < N ? buf[pos++] = ',' : 0), ...);
159
160 // ") is out of bound ("
161 for (char c : middle) {
162 if (c != '\0') {
163 buf[pos++] = c;
164 }
165 }
166
167 // %d:%d,%d:%d,...
168 ((buf[pos++] = '%', buf[pos++] = 'd', buf[pos++] = ':', buf[pos++] = '%', buf[pos++] = 'd',
169 idx + 1 < N ? buf[pos++] = ',' : 0), ...);
170
171 // suffix
172 for (char c : suffix) {
173 buf[pos++] = c;
174 }
175 return buf;
176 }
177
178 template <std::size_t... idx, std::size_t... idx2x>
180 void device_print_impl2 (std::index_sequence<idx...>,
181 std::index_sequence<idx2x...>,
182 IntVectND<sizeof...(idx)> const& iv,
183 IntVectND<sizeof...(idx)> const& begin,
184 IntVectND<sizeof...(idx)> const& end)
185 {
186 constexpr auto msg = make_oob_message_impl(std::index_sequence<idx...>{});
187
188 AMREX_DEVICE_PRINTF(msg.data(),
189 iv.vect[idx]...,
190 ((idx2x % 2 == 0) ? begin.vect[idx2x / 2] : end.vect[idx2x / 2])...
191 );
192 }
193
194 template <int N, std::enable_if_t<(N>=1),int> = 0>
196 void device_printf_impl (const IntVectND<N>& iv,
197 const IntVectND<N>& begin,
198 const IntVectND<N>& end)
199 {
200 device_print_impl2(
201 std::make_index_sequence<N>{},
202 std::make_index_sequence<N*2>{},
203 iv, begin, end
204 );
205 }
206 }
207
222 template<typename T, int N, bool last_dim_component = false>
223 struct ArrayND
224 {
225 static_assert(N >= 1, "ArrayND must have at least one dimension");
226 static_assert(N > 1 || !last_dim_component, "ArrayND with N=1 cannot have last_dim_component=true");
227
228 static constexpr bool IsLastDimComponent_v = last_dim_component;
229 static constexpr bool IsArray4_v = (N==4 && last_dim_component);
230
231 T* AMREX_RESTRICT p = nullptr;
232 AMREX_NO_UNIQUE_ADDRESS detail::Stride<N-1> stride{};
234 IntVectND<N> end{0}; // end is hi+1
235
237 constexpr ArrayND () noexcept : p(nullptr) {}
238
239 template <class U=T, std::enable_if_t<std::is_const_v<U>,int> = 0>
241 constexpr ArrayND (ArrayND<std::remove_const_t<T>, N, last_dim_component> const& rhs) noexcept
242 : p(rhs.p), stride(rhs.stride), begin(rhs.begin), end(rhs.end) {}
243
251 template <bool C = last_dim_component, std::enable_if_t<!C,int> = 0>
253 ArrayND (T* a_p, BoxND<N> const& box) noexcept
254 : ArrayND(a_p, box.smallEnd(), box.bigEnd() + 1)
255 {}
256
265 template <int M, std::enable_if_t<((M+1==N) || (N == 4 && M == AMREX_SPACEDIM))
266 && last_dim_component, int> = 0>
268 ArrayND (T* a_p, BoxND<M> const& box, int ncomp) noexcept
269 : ArrayND(a_p, box.smallEnd(), box.bigEnd() + 1, ncomp)
270 {}
271
272
281 template <bool C = last_dim_component, std::enable_if_t<!C,int> = 0>
283 constexpr ArrayND (T* a_p, IntVectND<N> const& a_begin, IntVectND<N> const& a_end) noexcept
284 : p(a_p), begin(a_begin), end(a_end)
285 {
286 set_stride();
287 }
288
298 template <bool B = IsArray4_v, std::enable_if_t<B,int> = 0>
300 constexpr ArrayND (T* a_p, Dim3 const& a_begin, Dim3 const& a_end, int a_ncomp) noexcept
301 : p(a_p), begin(a_begin.x, a_begin.y, a_begin.z, 0), end(a_end.x, a_end.y, a_end.z, a_ncomp)
302 {
303 set_stride();
304 }
305
315 template <int M, std::enable_if_t<((M+1 == N) || (N == 4 && M == AMREX_SPACEDIM))
316 && last_dim_component, int> = 0>
318 constexpr ArrayND (T* a_p, IntVectND<M> const& a_begin, IntVectND<M> const& a_end, int ncomp) noexcept
319 : p(a_p)
320 {
321 constexpr_for<0, M>([&](int d) {
322 begin.vect[d] = a_begin.vect[d];
323 end.vect[d] = a_end.vect[d];
324 });
325 constexpr_for<M, N>([&](int d) {
326 begin.vect[d] = 0;
327 end.vect[d] = 1;
328 });
329 end.vect[N-1] = ncomp;
330
331 set_stride();
332 }
333
341 template <class U,
342 std::enable_if_t
343 <std::is_same_v<std::remove_const_t<T>, std::remove_const_t<U>>
344 && (N >= 2) && last_dim_component,int> = 0>
346 constexpr ArrayND (ArrayND<U, N, last_dim_component> const& rhs, int start_comp) noexcept
347 : p((T*)(rhs.p + start_comp*rhs.stride.a[N-2])),
348 stride(rhs.stride),
349 begin(rhs.begin),
350 end(rhs.end)
351 {
352 begin.vect[N-1] = 0;
353 end.vect[N-1] = rhs.end.vect[N-1] - start_comp;
354 }
355
364 template <class U,
365 std::enable_if_t
366 <std::is_same_v<std::remove_const_t<T>, std::remove_const_t<U>>
367 && (N >= 2) && last_dim_component,int> = 0>
369 constexpr ArrayND (ArrayND<U, N, last_dim_component> const& rhs, int start_comp, int num_comp) noexcept
370 : p((T*)(rhs.p + start_comp*rhs.stride.a[N-2])),
371 stride(rhs.stride),
372 begin(rhs.begin),
373 end(rhs.end)
374 {
375 begin.vect[N-1] = 0;
376 end.vect[N-1] = num_comp;
377 }
378
384 constexpr explicit operator bool() const noexcept { return p != nullptr; }
385
391 [[nodiscard]] AMREX_GPU_HOST_DEVICE
392 constexpr bool ok () const noexcept { return p != nullptr && end.allGT(begin); }
393
407 template <typename... idx, class U=T,
408 std::enable_if_t<!std::is_void_v<U> && !IsArray4_v
409 && detail::ArrayNDIndexCheck_impl_v<N, last_dim_component, idx...>,int> = 0>
411 U& operator() (idx... i) const noexcept {
412 constexpr auto nidx = sizeof...(i);
413#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
414 index_assert(IntVectND<nidx>{i...});
415#endif
416 return p[get_offset(IntVectND<nidx>{i...})];
417 }
418
430 template <int M, class U=T, std::enable_if_t<
431 !std::is_void_v<U> &&
432 ((M == N) || (!IsArray4_v && last_dim_component && (M + 1 == N))), int> = 0>
434 U& operator() (IntVectND<M> const& iv) const noexcept {
435#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
436 index_assert(iv);
437#endif
438 return p[get_offset(iv)];
439 }
440
451 template <int M, class U=T, std::enable_if_t<
452 !std::is_void_v<U> && last_dim_component && !IsArray4_v
453 && (M + 1 == N), int> = 0>
455 U& operator() (IntVectND<M> const& iv, int n) const noexcept {
456#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
457 index_assert(iv, n);
458#endif
459 return p[get_offset(iv, n)];
460 }
461
468 template <typename... idx, class U=T,
469 std::enable_if_t<!std::is_void_v<U> && !IsArray4_v
470 && detail::ArrayNDIndexCheck_impl_v<N, last_dim_component, idx...>,int> = 0>
472 T* ptr (idx... i) const noexcept {
473 constexpr auto nidx = sizeof...(i);
474#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
475 index_assert(IntVectND<nidx>{i...});
476#endif
477 return p + get_offset(IntVectND<nidx>{i...});
478 }
479
491 template <int M, std::enable_if_t<(M == N)
492 || (!IsArray4_v && last_dim_component && (M + 1 == N)), int> = 0>
494 T* ptr (IntVectND<M> const& iv) const noexcept {
495#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
496 index_assert(iv);
497#endif
498 return p + get_offset(iv);
499 }
500
511 template <int M, std::enable_if_t<last_dim_component && !IsArray4_v
512 && (M + 1 == N), int> = 0>
514 T* ptr (IntVectND<M> const& iv, int n) const noexcept {
515#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
516 index_assert(iv, n);
517#endif
518 return p + get_offset(iv, n);
519 }
520
526 constexpr T* dataPtr () const noexcept {
527 return this->p;
528 }
529
535 constexpr int nComp () const noexcept {
536 if constexpr (last_dim_component) {
537 return end.vect[N-1];
538 } else {
539 return 1;
540 }
541 }
542
544 constexpr std::size_t size () const noexcept {
545 if (ok()) {
546 std::size_t s = 1;
547 constexpr_for<0, N>([&](int d) {
548 s *= (end.vect[d] - begin.vect[d]);
549 });
550 return s;
551 } else {
552 return 0;
553 }
554 }
555
556 template <int d, std::enable_if_t<(d < N) && (d >= 0), int> = 0>
558 constexpr Long get_stride () const noexcept {
559 if constexpr (N > 1 && d > 0) {
560 return stride.a[d-1];
561 } else {
562 return 1;
563 }
564 }
565
566 template <typename... idx, std::enable_if_t<!IsArray4_v &&
567 detail::ArrayNDIndexCheck_impl_v<N, last_dim_component, idx...>,int> = 0>
569 constexpr bool contains (idx... i) const noexcept {
570 constexpr auto nidx = sizeof...(i);
571 return this->contains(IntVectND<nidx>{i...});
572 }
573
574 template <int M, std::enable_if_t<(M == N)
575 || (!IsArray4_v && last_dim_component && (M + 1 == N)), int> = 0>
577 constexpr bool contains (IntVectND<M> const& iv) const noexcept {
578 bool inside = true;
579 constexpr_for<0, M>([&](int d) {
580 inside = inside && (iv.vect[d] >= begin.vect[d]) && (iv.vect[d] < end.vect[d]);
581 });
582 return inside;
583 }
584
585 template <int M, std::enable_if_t<last_dim_component && !IsArray4_v
586 && (M + 1 == N), int> = 0>
588 constexpr bool contains (IntVectND<M> const& iv, int n) const noexcept {
589 bool inside = true;
590 constexpr_for<0, M>([&](int d) {
591 inside = inside && (iv.vect[d] >= begin.vect[d]) && (iv.vect[d] < end.vect[d]);
592 });
593 inside = inside && (n >= 0) && (n < end.vect[N-1]);
594 return inside;
595 }
596
597 template <int M=N, bool B = IsArray4_v, std::enable_if_t<B,int> = 0>
599 CellData<T> cellData (int i, int j, int k) const noexcept {
600 int ncomp = end.vect[M-1];
601 return CellData<T>(this->ptr(i,j,k), stride.a[M-2], ncomp);
602 }
603
604 template <int M, std::enable_if_t<(M == N)
605 || (last_dim_component && (M + 1 == N || M == AMREX_SPACEDIM)), int> = 0>
607 constexpr Long get_offset (IntVectND<M> const& iv) const noexcept
608 {
609 Long offset = iv.vect[0] - begin.vect[0];
610 // If M == N and we have a component at the end, we only loop up to N-1 for spatial.
611 // Otherwise, we loop up to M.
612 constexpr int idx = (last_dim_component && M == N) ? N - 1 : M;
613 if constexpr (N > 1) {
614 constexpr_for<1, idx>([&](int d) {
615 offset += (iv.vect[d] - begin.vect[d]) * stride.a[d-1];
616 });
617 }
618
619 // Handle the component offset if the input is the full N dimensions
620 // and the last dimension is a component.
621 if constexpr (last_dim_component && M == N) {
622 offset += iv.vect[N-1] * stride.a[N-2];
623 }
624 return offset;
625 }
626
627 template <int M, std::enable_if_t<last_dim_component
628 && ((M + 1 == N) || (N == 4 && M == AMREX_SPACEDIM)), int> = 0>
630 constexpr Long get_offset (IntVectND<M> const& iv, int n) const noexcept
631 {
632 Long offset = iv.vect[0] - begin.vect[0];
633 constexpr_for<1, M>([&](int d) {
634 offset += (iv.vect[d] - begin.vect[d]) * stride.a[d-1];
635 });
636 offset += n * stride.a[N-2];
637 return offset;
638 }
639
640#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
641#if defined(AMREX_USE_HIP)
643#else
645#endif
646 void index_assert (IntVectND<N> const& iv) const
647 {
648 bool out_of_bounds = false;
649 for (int d = 0; d < N; ++d) {
650 if (iv.vect[d] < begin.vect[d] || iv.vect[d] >= end.vect[d]) {
651 out_of_bounds = true;
652 }
653 }
654 if (out_of_bounds) {
656 detail::device_printf_impl(iv, begin, end);
657 amrex::Abort();
658 ))
660 std::stringstream ss;
661 ss << " (";
662 for (int d = 0; d < N; ++d) {
663 ss << iv.vect[d];
664 if (d + 1 < N) ss << ",";
665 }
666 ss << ") is out of bound (";
667 for (int d = 0; d < N; ++d) {
668 ss << begin.vect[d] << ":" << end.vect[d]-1;
669 if (d + 1 < N) ss << ",";
670 };
671 ss << ")";
672 amrex::Abort(ss.str());
673 ))
674 }
675 }
676
677 // index_assert overload for M == N-1 last index assumed 0
678 // Only valid when last_dim_component == true
679 template <int M, std::enable_if_t<((M+1 == N) || (M == AMREX_SPACEDIM))
680 && last_dim_component,int> = 0>
681#if defined(AMREX_USE_HIP)
683#else
685#endif
686 void index_assert (IntVectND<M> const& iv) const
687 {
688 IntVectND<N> iv_full = iv.template expand<N>(0);
689 for (int d = M; d < N; ++d) {
690 iv_full.vect[d] = begin.vect[d];
691 }
692 index_assert(iv_full);
693 }
694
695 // index_assert overload for M == N-1 and last index n
696 // Only valid when last_dim_component == true
697 template <int M, std::enable_if_t<((M+1 == N) || (M == AMREX_SPACEDIM))
698 && last_dim_component,int> = 0>
699#if defined(AMREX_USE_HIP)
701#else
703#endif
704 void index_assert (IntVectND<M> const& iv, int n) const
705 {
706 IntVectND<N> iv_full = iv.template expand<N>(0);
707 for (int d = M; d < N-1; ++d) {
708 iv_full.vect[d] = begin.vect[d];
709 }
710 iv_full.vect[N-1] = n;
711 index_assert(iv_full);
712 }
713#endif
714
715 //
716 // Specialization for Array4
717 //
718
719 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
721 U& operator() (int i, int j, int k) const noexcept {
722#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
723 AMREX_ARRAY4_INDEX_ASSERT(i,j,k,0);
724#endif
725#if defined(AMREX_USE_GPU) || defined(AMREX_DEBUG)
726 return p[(i-begin.vect[0]) +
727 (j-begin.vect[1]) * stride.a[0] +
728 (k-begin.vect[2]) * stride.a[1]];
729#else
730 Long idx1 = i + j*stride.a[0] + k*stride.a[1];
731 Long idx0 = begin.vect[0]
732 + begin.vect[1] * stride.a[0]
733 + begin.vect[2] * stride.a[1];
734 return p[idx1-idx0];
735#endif
736 }
737
738 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
740 U& operator() (int i, int j, int k, int n) const noexcept {
741#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
742 AMREX_ARRAY4_INDEX_ASSERT(i,j,k,n);
743#endif
744#if defined(AMREX_USE_GPU) || defined(AMREX_DEBUG)
745 return p[(i-begin.vect[0]) +
746 (j-begin.vect[1]) * stride.a[0] +
747 (k-begin.vect[2]) * stride.a[1] +
748 n * stride.a[2]];
749#else
750 Long idx1 = i + j*stride.a[0] + k*stride.a[1] + n*stride.a[2];
751 Long idx0 = begin.vect[0]
752 + begin.vect[1] * stride.a[0]
753 + begin.vect[2] * stride.a[1];
754 return p[idx1-idx0];
755#endif
756 }
757
758 template <int M, class U=T,
759 std::enable_if_t<!std::is_void_v<U> && IsArray4_v && (M == 3 || M == AMREX_SPACEDIM), int> = 0>
761 U& operator() (IntVectND<M> const& iv) const noexcept {
762#if (AMREX_SPACEDIM == 1)
763 if constexpr (M == 1) {
764 return this->operator()(iv.vect[0],0,0);
765 } else
766#elif (AMREX_SPACEDIM == 2)
767 if constexpr (M == 2) {
768 return this->operator()(iv.vect[0],iv.vect[1],0);
769 } else
770#endif
771 {
772 return this->operator()(iv.vect[0],iv.vect[1],iv.vect[2]);
773 }
774 }
775
776 template <int M, class U=T,
777 std::enable_if_t<!std::is_void_v<U> && IsArray4_v && (M == 3 || M == AMREX_SPACEDIM), int> = 0>
779 U& operator() (IntVectND<M> const& iv, int n) const noexcept {
780#if (AMREX_SPACEDIM == 1)
781 if constexpr (M == 1) {
782 return this->operator()(iv.vect[0],0,0,n);
783 } else
784#elif (AMREX_SPACEDIM == 2)
785 if constexpr (M == 2) {
786 return this->operator()(iv.vect[0],iv.vect[1],0,n);
787 } else
788#endif
789 {
790 return this->operator()(iv.vect[0],iv.vect[1],iv.vect[2],n);
791 }
792 }
793
794 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
796 U& operator() (Dim3 const& cell) const noexcept {
797 return this->operator()(cell.x,cell.y,cell.z);
798 }
799
800 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
802 U& operator() (Dim3 const& cell, int n) const noexcept {
803 return this->operator()(cell.x,cell.y,cell.z,n);
804 }
805
806 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
808 U* ptr (int i, int j, int k) const noexcept {
809#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
810 AMREX_ARRAY4_INDEX_ASSERT(i,j,k,0);
811#endif
812#if defined(AMREX_USE_GPU) || defined(AMREX_DEBUG)
813 return p + ((i-begin.vect[0]) +
814 (j-begin.vect[1]) * stride.a[0] +
815 (k-begin.vect[2]) * stride.a[1]);
816#else
817 Long idx1 = i + j*stride.a[0] + k*stride.a[1];
818 Long idx0 = begin.vect[0]
819 + begin.vect[1] * stride.a[0]
820 + begin.vect[2] * stride.a[1];
821 return p + (idx1-idx0);
822#endif
823 }
824
825 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
827 U* ptr (int i, int j, int k, int n) const noexcept {
828#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
829 AMREX_ARRAY4_INDEX_ASSERT(i,j,k,n);
830#endif
831#if defined(AMREX_USE_GPU) || defined(AMREX_DEBUG)
832 return p + ((i-begin.vect[0]) +
833 (j-begin.vect[1]) * stride.a[0] +
834 (k-begin.vect[2]) * stride.a[1] +
835 n * stride.a[2]);
836#else
837 Long idx1 = i + j*stride.a[0] + k*stride.a[1] + n*stride.a[2];
838 Long idx0 = begin.vect[0]
839 + begin.vect[1] * stride.a[0]
840 + begin.vect[2] * stride.a[1];
841 return p + (idx1-idx0);
842#endif
843 }
844
845 template <int M, class U=T,
846 std::enable_if_t<!std::is_void_v<U> && IsArray4_v && (M == 3 || M == AMREX_SPACEDIM), int> = 0>
848 U* ptr (IntVectND<M> const& iv) const noexcept {
849#if (AMREX_SPACEDIM == 1)
850 if constexpr (M == 1) {
851 return this->ptr(iv.vect[0],0,0);
852 } else
853#elif (AMREX_SPACEDIM == 2)
854 if constexpr (M == 2) {
855 return this->ptr(iv.vect[0],iv.vect[1],0);
856 } else
857#endif
858 {
859 return this->ptr(iv.vect[0],iv.vect[1],iv.vect[2]);
860 }
861 }
862
863 template <int M, class U=T,
864 std::enable_if_t<!std::is_void_v<U> && IsArray4_v && (M == 3 || M == AMREX_SPACEDIM), int> = 0>
866 U* ptr (IntVectND<M> const& iv, int n) const noexcept {
867#if (AMREX_SPACEDIM == 1)
868 if constexpr (M == 1) {
869 return this->ptr(iv.vect[0],0,0,n);
870 } else
871#elif (AMREX_SPACEDIM == 2)
872 if constexpr (M == 2) {
873 return this->ptr(iv.vect[0],iv.vect[1],0,n);
874 } else
875#endif
876 {
877 return this->ptr(iv.vect[0],iv.vect[1],iv.vect[2],n);
878 }
879 }
880
881 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
883 U* ptr (Dim3 const& cell) const noexcept {
884 return this->ptr(cell.x,cell.y,cell.z);
885 }
886
887 template <class U=T, std::enable_if_t<!std::is_void_v<U> && IsArray4_v, int> = 0>
889 U* ptr (Dim3 const& cell, int n) const noexcept {
890 return this->ptr(cell.x,cell.y,cell.z,n);
891 }
892
893 template <bool B = IsArray4_v, std::enable_if_t<B,int> = 0>
895 bool contains (int i, int j, int k) const noexcept {
896 return (i>=begin.vect[0] && i<end.vect[0] &&
897 j>=begin.vect[1] && j<end.vect[1] &&
898 k>=begin.vect[2] && k<end.vect[2]);
899 }
900
901 template <int M, std::enable_if_t<IsArray4_v && (M == 3 || M == AMREX_SPACEDIM), int> = 0>
903 bool contains (IntVectND<M> const& iv) const noexcept {
904#if (AMREX_SPACEDIM < 3)
905 if constexpr (M == AMREX_SPACEDIM) {
906 return AMREX_D_TERM((iv.vect[0]>=begin.vect[0]) && (iv.vect[0]<end.vect[0]),
907 && (iv.vect[1]>=begin.vect[1]) && (iv.vect[1]<end.vect[1]),
908 && (iv.vect[2]>=begin.vect[2]) && (iv.vect[2]<end.vect[2]));
909 } else
910#endif
911 {
912 return (iv.vect[0]>=begin.vect[0]) && (iv.vect[0]<end.vect[0])
913 && (iv.vect[1]>=begin.vect[1]) && (iv.vect[1]<end.vect[1])
914 && (iv.vect[2]>=begin.vect[2]) && (iv.vect[2]<end.vect[2]);
915 }
916 }
917
918 template <bool B = IsArray4_v, std::enable_if_t<B,int> = 0>
920 bool contains (Dim3 const& cell) const noexcept {
921 return (cell.x>=begin.vect[0] && cell.x<end.vect[0] &&
922 cell.y>=begin.vect[1] && cell.y<end.vect[1] &&
923 cell.z>=begin.vect[2] && cell.z<end.vect[2]);
924 }
925
926#if defined(AMREX_DEBUG) || defined(AMREX_BOUND_CHECK)
927 template <bool B = IsArray4_v, std::enable_if_t<B,int> = 0>
928#if defined(AMREX_USE_HIP)
930#else
932#endif
933 void index_assert_print_error_message (int i, int j, int k, int n) const
934 {
936 AMREX_DEVICE_PRINTF(" (%d,%d,%d,%d) is out of bound (%d:%d,%d:%d,%d:%d,0:%d)\n",
937 i, j, k, n,
938 begin.vect[0], end.vect[0]-1,
939 begin.vect[1], end.vect[1]-1,
940 begin.vect[2], end.vect[2]-1,
941 end.vect[3]-1);
942 amrex::Abort();
943 ))
945 std::stringstream ss;
946 ss << " (" << i << "," << j << "," << k << "," << n
947 << ") is out of bound ("
948 << begin.vect[0] << ":" << end.vect[0]-1 << ","
949 << begin.vect[1] << ":" << end.vect[1]-1 << ","
950 << begin.vect[2] << ":" << end.vect[2]-1 << ","
951 << "0:" << end.vect[3]-1 << ")";
952 amrex::Abort(ss.str());
953 ))
954 }
955#endif
956
957 private:
959 constexpr void set_stride () noexcept {
960 if constexpr (N > 1) {
961 Long current_stride = 1;
962 constexpr_for<0, N-1>([&](int d) {
963 Long len = end.vect[d] - begin.vect[d];
964 current_stride *= len;
965 stride.a[d] = current_stride;
966 });
967 }
968 }
969 };
970
971 // Deduction guides for ArrayND
972 // 1: Matches ArrayND (T*, BoxND<N>) -> ArrayND<T,N, last_dim_component=false>
973 template <typename T, int N>
975
976 // 2: Matches ArrayND (T*, BoxND<N>, ncomp) -> ArrayND<T,N+1, last_dim_component=true>
977 // This supports the "N+1" logic (Spatial + Component)
978 template <typename T, int N>
980
981 // 3: Matches ArrayND (T*, IntVectND<N>, IntVectND<N>) -> ArrayND<T,N, last_dim_component=false>
982 template <typename T, int N>
984
985 // 4: Matches ArrayND (T*, IntVectND<N>, IntVectND<N>, int) -> ArrayND<T,N+1, last_dim_component=true>
986 template <typename T, int N>
988
989 // 5: Matches ArrayND (T*, Dim3, Dim3) -> ArrayND<T,4, last_dim_component=true>
990 template <typename T>
991 ArrayND (T*, Dim3 const&, Dim3 const&, int) -> ArrayND<T, 4, true>;
992
993 template<typename T>
995
996 template <class T>
998 Dim3 lbound (Array4<T> const& a) noexcept
999 {
1000 return Dim3{a.begin.vect[0],a.begin.vect[1],a.begin.vect[2]};
1001 }
1002
1003 template <class T>
1005 Dim3 ubound (Array4<T> const& a) noexcept
1006 {
1007 return Dim3{a.end.vect[0]-1,a.end.vect[1]-1,a.end.vect[2]-1};
1008 }
1009
1010 template <class T>
1012 Dim3 length (Array4<T> const& a) noexcept
1013 {
1014 return Dim3{a.end.vect[0]-a.begin.vect[0],a.end.vect[1]-a.begin.vect[1],a.end.vect[2]-a.begin.vect[2]};
1015 }
1016
1017 template <typename T, int N, bool C>
1018 std::ostream& operator<< (std::ostream& os, const ArrayND<T,N,C>& a) {
1019 os << "(" << a.begin << ',' << a.end-1 << ")";
1020 return os;
1021 }
1022
1023 //
1024 // Type traits for detecting if a class has a size() constexpr function.
1025 //
1026 template <class A, class Enable = void> struct HasMultiComp : std::false_type {};
1027 //
1028 template <class B>
1029 struct HasMultiComp<B, std::enable_if_t<B().size() >= 1>>
1030 : std::true_type {};
1031
1032 //
1033 // PolymorphicArray4 can be used to access both AoS and SoA with
1034 // (i,j,k,n). Here SoA refers multi-component BaseFab, and AoS refers
1035 // to single-component BaseFab of multi-component GpuArray.
1036 //
1037 template <typename T>
1039 : public Array4<T>
1040 {
1043 : Array4<T>{a} {}
1044
1046 T& operator() (int i, int j, int k) const noexcept {
1047 return this->Array4<T>::operator()(i,j,k);
1048 }
1049
1050 template <class U=T, std::enable_if_t< amrex::HasMultiComp<U>::value,int> = 0>
1052 typename U::reference_type
1053 operator() (int i, int j, int k, int n) const noexcept {
1054 return this->Array4<T>::operator()(i,j,k,0)[n];
1055 }
1056
1057 template <class U=T, std::enable_if_t<!amrex::HasMultiComp<U>::value,int> = 0>
1059 U& operator() (int i, int j, int k, int n) const noexcept {
1060 return this->Array4<T>::operator()(i,j,k,n);
1061 }
1062 };
1063
1064 template <typename T>
1065 [[nodiscard]] PolymorphicArray4<T>
1067 {
1068 return PolymorphicArray4<T>(a);
1069 }
1070}
1071
1072#endif
#define AMREX_NO_UNIQUE_ADDRESS
Definition AMReX_Extension.H:263
#define AMREX_NO_INLINE
Definition AMReX_Extension.H:136
#define AMREX_FORCE_INLINE
Definition AMReX_Extension.H:119
#define AMREX_RESTRICT
Definition AMReX_Extension.H:32
#define AMREX_DEVICE_PRINTF(...)
Definition AMReX_GpuPrint.H:21
#define AMREX_IF_ON_DEVICE(CODE)
Definition AMReX_GpuQualifiers.H:56
#define AMREX_IF_ON_HOST(CODE)
Definition AMReX_GpuQualifiers.H:58
#define AMREX_GPU_HOST_DEVICE
Definition AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
A Rectangular Domain on an Integer Lattice.
Definition AMReX_Box.H:49
__host__ __device__ const IntVectND< dim > & bigEnd() const &noexcept
Return the inclusive upper bound of the box.
Definition AMReX_Box.H:123
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
An Integer Vector in dim-Dimensional Space.
Definition AMReX_IntVect.H:57
__host__ __device__ constexpr bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition AMReX_IntVect.H:425
int vect[dim]
Definition AMReX_IntVect.H:793
amrex_long Long
Definition AMReX_INT.H:30
Definition AMReX_Amr.cpp:49
__host__ __device__ Dim3 ubound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:1005
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:1012
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
__host__ __device__ constexpr void constexpr_for(F const &f)
Definition AMReX_ConstexprFor.H:28
PolymorphicArray4< T > makePolymorphic(Array4< T > const &a)
Definition AMReX_Array4.H:1066
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
std::ostream & operator<<(std::ostream &os, AmrMesh const &amr_mesh)
Definition AMReX_AmrMesh.cpp:1237
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:998
A multidimensional array accessor.
Definition AMReX_Array4.H:224
__host__ __device__ constexpr Long get_offset(IntVectND< M > const &iv) const noexcept
Definition AMReX_Array4.H:607
static constexpr bool IsArray4_v
Definition AMReX_Array4.H:229
__host__ __device__ U * ptr(Dim3 const &cell) const noexcept
Definition AMReX_Array4.H:883
__host__ __device__ constexpr bool ok() const noexcept
Check if the ArrayND pointer is valid and bounds are valid.
Definition AMReX_Array4.H:392
__host__ __device__ ArrayND(T *a_p, BoxND< N > const &box) noexcept
Constructor using a BoxND.
Definition AMReX_Array4.H:253
__host__ __device__ constexpr std::size_t size() const noexcept
Definition AMReX_Array4.H:544
__host__ __device__ bool contains(int i, int j, int k) const noexcept
Definition AMReX_Array4.H:895
T *__restrict__ p
Definition AMReX_Array4.H:231
__host__ __device__ constexpr bool contains(idx... i) const noexcept
Definition AMReX_Array4.H:569
__host__ __device__ T * ptr(idx... i) const noexcept
Multi-index ptr() for accessing pointer to element.
Definition AMReX_Array4.H:472
__host__ __device__ bool contains(IntVectND< M > const &iv) const noexcept
Definition AMReX_Array4.H:903
static constexpr bool IsLastDimComponent_v
Definition AMReX_Array4.H:228
__host__ __device__ CellData< T > cellData(int i, int j, int k) const noexcept
Definition AMReX_Array4.H:599
__host__ __device__ constexpr ArrayND(ArrayND< std::remove_const_t< T >, N, last_dim_component > const &rhs) noexcept
Definition AMReX_Array4.H:241
detail::Stride< N-1 > stride
Definition AMReX_Array4.H:232
__host__ __device__ constexpr int nComp() const noexcept
Get number of components.
Definition AMReX_Array4.H:535
__host__ __device__ bool contains(Dim3 const &cell) const noexcept
Definition AMReX_Array4.H:920
__host__ __device__ constexpr T * dataPtr() const noexcept
Get raw data pointer.
Definition AMReX_Array4.H:526
IntVectND< N > end
Definition AMReX_Array4.H:234
IntVectND< N > begin
Definition AMReX_Array4.H:233
__host__ __device__ U * ptr(int i, int j, int k, int n) const noexcept
Definition AMReX_Array4.H:827
__host__ __device__ constexpr ArrayND(T *a_p, Dim3 const &a_begin, Dim3 const &a_end, int a_ncomp) noexcept
Constructor for N=4 using Dim3.
Definition AMReX_Array4.H:300
__host__ __device__ constexpr ArrayND(T *a_p, IntVectND< N > const &a_begin, IntVectND< N > const &a_end) noexcept
IntVectND<N> constructor.
Definition AMReX_Array4.H:283
__host__ __device__ constexpr Long get_stride() const noexcept
Definition AMReX_Array4.H:558
__host__ __device__ constexpr ArrayND() noexcept
Definition AMReX_Array4.H:237
__host__ __device__ U & operator()(idx... i) const noexcept
Multi-index operator() for accessing elements.
Definition AMReX_Array4.H:411
__host__ __device__ U * ptr(int i, int j, int k) const noexcept
Definition AMReX_Array4.H:808
__host__ __device__ U * ptr(Dim3 const &cell, int n) const noexcept
Definition AMReX_Array4.H:889
Definition AMReX_Array4.H:32
__host__ __device__ constexpr CellData(T *a_p, Long a_stride, int a_ncomp)
Definition AMReX_Array4.H:38
int ncomp
Definition AMReX_Array4.H:35
__host__ __device__ constexpr CellData(CellData< std::remove_const_t< T > > const &rhs) noexcept
Definition AMReX_Array4.H:45
Long stride
Definition AMReX_Array4.H:34
__host__ __device__ U & operator[](int n) const noexcept
Definition AMReX_Array4.H:58
__host__ __device__ int nComp() const noexcept
Definition AMReX_Array4.H:53
T *__restrict__ p
Definition AMReX_Array4.H:33
Definition AMReX_Dim3.H:12
Definition AMReX_Array4.H:1026
Definition AMReX_Array4.H:1040
__host__ __device__ PolymorphicArray4(Array4< T > const &a)
Definition AMReX_Array4.H:1042