1 #ifndef AMREX_FABARRAY_UTILITY_H_
2 #define AMREX_FABARRAY_UTILITY_H_
3 #include <AMReX_Config.H>
13 template <
class FAB,
class F,
14 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
15 typename FAB::value_type
21 template <
class FAB,
class F,
22 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
23 typename FAB::value_type
26 using value_type =
typename FAB::value_type;
30 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
34 const Box& bx = mfi.growntilebox(nghost);
45 template <
class OP,
class FAB,
class F>
46 std::enable_if_t<IsBaseFab<FAB>::value,
47 std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
48 std::is_same<OP,ReduceOpLogicalOr>::value,
49 int,
typename FAB::value_type> >
52 using T = std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
53 std::is_same<OP,ReduceOpLogicalOr>::value,
54 int,
typename FAB::value_type>;
64 template <
class OP,
class FAB1,
class FAB2,
class F>
66 std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
67 std::is_same<OP,ReduceOpLogicalOr>::value,
68 int,
typename FAB1::value_type> >
71 using T = std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
72 std::is_same<OP,ReduceOpLogicalOr>::value,
73 int,
typename FAB1::value_type>;
81 ma1[box_no], ma2[box_no])) };
85 template <
class OP,
class FAB1,
class FAB2,
class FAB3,
class F>
87 std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
88 std::is_same<OP,ReduceOpLogicalOr>::value,
89 int,
typename FAB1::value_type> >
93 using T = std::conditional_t<std::is_same<OP,ReduceOpLogicalAnd>::value ||
94 std::is_same<OP,ReduceOpLogicalOr>::value,
95 int,
typename FAB1::value_type>;
104 ma1[box_no], ma2[box_no], ma3[box_no])) };
108 template <
class FAB,
class F>
109 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
115 template <
class FAB,
class F>
116 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
120 amrex::Abort(
"ReduceSum: Launch Region is off. Device lambda cannot be called by host.");
125 template <
class FAB,
class F,
126 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
127 typename FAB::value_type
131 return fudetail::ReduceMF<ReduceOpSum>(fa, nghost, std::forward<F>(
f));
137 template <
class FAB,
class F,
138 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
139 typename FAB::value_type
146 template <
class FAB1,
class FAB2,
class F,
147 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
148 typename FAB1::value_type
155 template <
class FAB1,
class FAB2,
class F,
156 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
157 typename FAB1::value_type
161 using value_type =
typename FAB1::value_type;
165 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
169 const Box& bx = mfi.growntilebox(nghost);
172 sm +=
f(bx, arr1, arr2);
181 template <
class FAB1,
class FAB2,
class F>
182 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
189 template <
class FAB1,
class FAB2,
class F>
190 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
195 amrex::Abort(
"ReduceSum: Launch Region is off. Device lambda cannot be called by host.");
200 template <
class FAB1,
class FAB2,
class F,
201 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
202 typename FAB1::value_type
207 return fudetail::ReduceMF<ReduceOpSum>(fa1,fa2,nghost,std::forward<F>(
f));
213 template <
class FAB1,
class FAB2,
class F,
214 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
215 typename FAB1::value_type
216 ReduceSum (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
223 template <
class FAB1,
class FAB2,
class FAB3,
class F,
224 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
225 typename FAB1::value_type
233 template <
class FAB1,
class FAB2,
class FAB3,
class F,
234 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
235 typename FAB1::value_type
239 using value_type =
typename FAB1::value_type;
243 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
247 const Box& bx = mfi.growntilebox(nghost);
251 sm +=
f(bx, arr1, arr2, arr3);
260 template <
class FAB1,
class FAB2,
class FAB3,
class F>
261 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
268 template <
class FAB1,
class FAB2,
class FAB3,
class F>
269 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
274 amrex::Abort(
"ReduceSum: Launch Region is off. Device lambda cannot be called by host.");
279 template <
class FAB1,
class FAB2,
class FAB3,
class F,
280 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
281 typename FAB1::value_type
286 return fudetail::ReduceMF<ReduceOpSum>(fa1,fa2,fa3,nghost,std::forward<F>(
f));
292 template <
class FAB1,
class FAB2,
class FAB3,
class F,
293 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
294 typename FAB1::value_type
295 ReduceSum (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
296 FabArray<FAB3>
const& fa3,
IntVect const& nghost, F&&
f)
302 template <
class FAB,
class F,
303 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
304 typename FAB::value_type
311 template <
class FAB,
class F,
312 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
313 typename FAB::value_type
316 using value_type =
typename FAB::value_type;
320 #pragma omp parallel reduction(min:r)
324 const Box& bx = mfi.growntilebox(nghost);
334 template <
class FAB,
class F>
335 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
341 template <
class FAB,
class F>
342 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
346 amrex::Abort(
"ReduceMin: Launch Region is off. Device lambda cannot be called by host.");
351 template <
class FAB,
class F,
352 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
353 typename FAB::value_type
357 return fudetail::ReduceMF<ReduceOpMin>(fa, nghost, std::forward<F>(
f));
363 template <
class FAB,
class F,
364 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
365 typename FAB::value_type
372 template <
class FAB1,
class FAB2,
class F,
373 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
374 typename FAB1::value_type
381 template <
class FAB1,
class FAB2,
class F,
382 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
383 typename FAB1::value_type
387 using value_type =
typename FAB1::value_type;
391 #pragma omp parallel reduction(min:r)
395 const Box& bx = mfi.growntilebox(nghost);
407 template <
class FAB1,
class FAB2,
class F>
408 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
415 template <
class FAB1,
class FAB2,
class F>
416 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
421 amrex::Abort(
"ReduceMin: Launch Region is off. Device lambda cannot be called by host.");
426 template <
class FAB1,
class FAB2,
class F,
427 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
428 typename FAB1::value_type
433 return fudetail::ReduceMF<ReduceOpMin>(fa1,fa2,nghost,std::forward<F>(
f));
439 template <
class FAB1,
class FAB2,
class F,
440 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
441 typename FAB1::value_type
442 ReduceMin (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
449 template <
class FAB1,
class FAB2,
class FAB3,
class F,
450 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
451 typename FAB1::value_type
459 template <
class FAB1,
class FAB2,
class FAB3,
class F,
460 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
461 typename FAB1::value_type
465 using value_type =
typename FAB1::value_type;
469 #pragma omp parallel reduction(min:r)
473 const Box& bx = mfi.growntilebox(nghost);
486 template <
class FAB1,
class FAB2,
class FAB3,
class F>
487 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
494 template <
class FAB1,
class FAB2,
class FAB3,
class F>
495 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
500 amrex::Abort(
"ReduceMin: Launch Region is off. Device lambda lambda cannot be called by host.");
505 template <
class FAB1,
class FAB2,
class FAB3,
class F,
506 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
507 typename FAB1::value_type
512 return fudetail::ReduceMF<ReduceOpMin>(fa1,fa2,fa3,nghost,std::forward<F>(
f));
518 template <
class FAB1,
class FAB2,
class FAB3,
class F,
519 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
520 typename FAB1::value_type
521 ReduceMin (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
522 FabArray<FAB3>
const& fa3,
IntVect const& nghost, F&&
f)
528 template <
class FAB,
class F,
529 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
530 typename FAB::value_type
537 template <
class FAB,
class F,
538 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
539 typename FAB::value_type
542 using value_type =
typename FAB::value_type;
543 value_type
r = std::numeric_limits<value_type>::lowest();
546 #pragma omp parallel reduction(max:r)
550 const Box& bx = mfi.growntilebox(nghost);
561 template <
class FAB,
class F>
562 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
568 template <
class FAB,
class F>
569 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB::value_type>
573 amrex::Abort(
"ReduceMax: Launch Region is off. Device lambda cannot be called by host.");
578 template <
class FAB,
class F,
579 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
580 typename FAB::value_type
584 return fudetail::ReduceMF<ReduceOpMax>(fa,nghost,std::forward<F>(
f));
590 template <
class FAB,
class F,
591 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
592 typename FAB::value_type
599 template <
class FAB1,
class FAB2,
class F,
600 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
601 typename FAB1::value_type
608 template <
class FAB1,
class FAB2,
class F,
609 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
610 typename FAB1::value_type
614 using value_type =
typename FAB1::value_type;
615 value_type
r = std::numeric_limits<value_type>::lowest();
618 #pragma omp parallel reduction(max:r)
622 const Box& bx = mfi.growntilebox(nghost);
634 template <
class FAB1,
class FAB2,
class F>
635 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
642 template <
class FAB1,
class FAB2,
class F>
643 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
648 amrex::Abort(
"ReduceMax: Launch Region is off. Device lambda cannot be called by host.");
653 template <
class FAB1,
class FAB2,
class F,
654 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
655 typename FAB1::value_type
660 return fudetail::ReduceMF<ReduceOpMax>(fa1,fa2,nghost,std::forward<F>(
f));
666 template <
class FAB1,
class FAB2,
class F,
667 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
668 typename FAB1::value_type
669 ReduceMax (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
676 template <
class FAB1,
class FAB2,
class FAB3,
class F,
677 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
678 typename FAB1::value_type
686 template <
class FAB1,
class FAB2,
class FAB3,
class F,
687 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
688 typename FAB1::value_type
692 using value_type =
typename FAB1::value_type;
693 value_type
r = std::numeric_limits<value_type>::lowest();
696 #pragma omp parallel reduction(max:r)
700 const Box& bx = mfi.growntilebox(nghost);
713 template <
class FAB1,
class FAB2,
class FAB3,
class F>
714 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
721 template <
class FAB1,
class FAB2,
class FAB3,
class F>
722 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
typename FAB1::value_type>
727 amrex::Abort(
"ReduceMax: Launch Region is off. Device lambda lambda cannot be called by host.");
732 template <
class FAB1,
class FAB2,
class FAB3,
class F,
733 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
734 typename FAB1::value_type
739 return fudetail::ReduceMF<ReduceOpMax>(fa1,fa2,fa3,nghost,std::forward<F>(
f));
745 template <
class FAB1,
class FAB2,
class FAB3,
class F,
746 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
747 typename FAB1::value_type
748 ReduceMax (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
749 FabArray<FAB3>
const& fa3,
IntVect const& nghost, F&&
f)
755 template <
class FAB,
class F,
756 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
764 template <
class FAB,
class F,
765 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
772 #pragma omp parallel reduction(&&:r)
776 const Box& bx = mfi.growntilebox(nghost);
787 template <
class FAB,
class F>
788 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
bool>
794 template <
class FAB,
class F>
795 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
bool>
799 amrex::Abort(
"ReduceLogicalAnd: Launch Region is off. Device lambda cannot be called by host.");
804 template <
class FAB,
class F,
805 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
810 return fudetail::ReduceMF<ReduceOpLogicalAnd>(fa,nghost,std::forward<F>(
f));
816 template <
class FAB,
class F,
817 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
825 template <
class FAB1,
class FAB2,
class F,
826 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
835 template <
class FAB1,
class FAB2,
class F,
836 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
844 #pragma omp parallel reduction(&&:r)
848 const Box& bx = mfi.growntilebox(nghost);
851 r =
r &&
f(bx, arr1, arr2);
860 template <
class FAB1,
class FAB2,
class F>
861 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
bool>
868 template <
class FAB1,
class FAB2,
class F>
869 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
bool>
874 amrex::Abort(
"ReduceLogicalAnd: Luanch Region is off. Device lambda cannot be called by host.");
879 template <
class FAB1,
class FAB2,
class F,
880 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
886 return fudetail::ReduceMF<ReduceOpLogicalAnd>(fa1,fa2,nghost,std::forward<F>(
f));
892 template <
class FAB1,
class FAB2,
class F,
893 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
902 template <
class FAB,
class F,
903 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
911 template <
class FAB,
class F,
912 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
919 #pragma omp parallel reduction(||:r)
923 const Box& bx = mfi.growntilebox(nghost);
934 template <
class FAB,
class F>
935 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
bool>
941 template <
class FAB,
class F>
942 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
bool>
946 amrex::Abort(
"ReduceLogicalOr: Launch Region is off. Device lambda cannot be called by host.");
951 template <
class FAB,
class F,
952 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
957 return fudetail::ReduceMF<ReduceOpLogicalOr>(fa,nghost,std::forward<F>(
f));
963 template <
class FAB,
class F,
964 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
972 template <
class FAB1,
class FAB2,
class F,
973 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
982 template <
class FAB1,
class FAB2,
class F,
983 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
991 #pragma omp parallel reduction(||:r)
995 const Box& bx = mfi.growntilebox(nghost);
998 r =
r ||
f(bx, arr1, arr2);
1005 #ifdef AMREX_USE_GPU
1006 namespace fudetail {
1007 template <
class FAB1,
class FAB2,
class F>
1008 std::enable_if_t<!amrex::DefinitelyNotHostRunnable<F>::value,
bool>
1015 template <
class FAB1,
class FAB2,
class F>
1016 std::enable_if_t<amrex::DefinitelyNotHostRunnable<F>::value,
bool>
1021 amrex::Abort(
"ReeuceLogicalOr: Launch Region is off. Device lambda cannot be called by host.");
1026 template <
class FAB1,
class FAB2,
class F,
1027 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
1033 return fudetail::ReduceMF<ReduceOpLogicalOr>(fa1,fa2,nghost,std::forward<F>(
f));
1039 template <
class FAB1,
class FAB2,
class F,
1040 class bar = std::enable_if_t<IsBaseFab<FAB1>::value> >
1042 ReduceLogicalOr (FabArray<FAB1>
const& fa1, FabArray<FAB2>
const& fa2,
1049 template <class FAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1058 int n = (comp >= 0) ? 1 : mf.
nComp();
1065 *
dp = fab(cell, comp);
1067 for (
int i = 0; i < n; ++i) {
1068 dp[i] = fab(cell,i);
1073 #ifdef AMREX_USE_GPU
1085 <<
": " << *
dp <<
'\n';
1087 std::ostringstream ss;
1089 for (
int i = 0; i < n-1; ++i)
1091 ss <<
dp[i] <<
", ";
1095 <<
": " << ss.str() <<
'\n';
1101 template <
class FAB,
1102 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1109 template <
class FAB,
1110 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1114 #ifdef AMREX_USE_GPU
1116 auto const& dstfa = dst.
arrays();
1121 dstfa[box_no](i,j,k,n+dstcomp) -= srcfa[box_no](i,j,k,n+srccomp);
1129 #ifdef AMREX_USE_OMP
1130 #pragma omp parallel if (Gpu::notInLaunchRegion())
1134 const Box& bx = mfi.growntilebox(nghost);
1137 auto const srcFab = src.
array(mfi);
1138 auto dstFab = dst.
array(mfi);
1141 dstFab(i,j,k,n+dstcomp) -= srcFab(i,j,k,n+srccomp);
1149 template <
class FAB,
1150 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1157 template <
class FAB,
1158 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1162 #ifdef AMREX_USE_GPU
1164 auto const& dstfa = dst.
arrays();
1169 dstfa[box_no](i,j,k,n+dstcomp) *= srcfa[box_no](i,j,k,n+srccomp);
1177 #ifdef AMREX_USE_OMP
1178 #pragma omp parallel if (Gpu::notInLaunchRegion())
1182 const Box& bx = mfi.growntilebox(nghost);
1185 auto const srcFab = src.
array(mfi);
1186 auto dstFab = dst.
array(mfi);
1189 dstFab(i,j,k,n+dstcomp) *= srcFab(i,j,k,n+srccomp);
1197 template <
class FAB,
1198 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1205 template <
class FAB,
1206 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1210 #ifdef AMREX_USE_GPU
1212 auto const& dstfa = dst.
arrays();
1217 dstfa[box_no](i,j,k,n+dstcomp) /= srcfa[box_no](i,j,k,n+srccomp);
1225 #ifdef AMREX_USE_OMP
1226 #pragma omp parallel if (Gpu::notInLaunchRegion())
1230 const Box& bx = mfi.growntilebox(nghost);
1233 auto const srcFab = src.
array(mfi);
1234 auto dstFab = dst.
array(mfi);
1237 dstFab(i,j,k,n+dstcomp) /= srcFab(i,j,k,n+srccomp);
1244 template <
class FAB,
1245 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1252 template <
class FAB,
1253 class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1257 #ifdef AMREX_USE_GPU
1259 auto const& fabarr = fa.
arrays();
1263 fabarr[box_no](i,j,k,n+icomp) =
std::abs(fabarr[box_no](i,j,k,n+icomp));
1271 #ifdef AMREX_USE_OMP
1272 #pragma omp parallel if (Gpu::notInLaunchRegion())
1276 const Box& bx = mfi.growntilebox(nghost);
1279 auto const& fab = fa.
array(mfi);
1282 fab(i,j,k,n+icomp) =
std::abs(fab(i,j,k,n+icomp));
1289 template <class FAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1293 #ifdef AMREX_USE_GPU
1304 template <class FAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1308 #ifdef AMREX_USE_GPU
1320 template <class FAB, class IFAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value
1321 && IsBaseFab<IFAB>::value> >
1332 template <class FAB, class IFAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value
1333 && IsBaseFab<IFAB>::value> >
1342 const int ncomp = fa.
nComp();
1344 #ifdef AMREX_USE_GPU
1346 auto const& fabarr = fa.
arrays();
1351 if (!ifabarr[box_no](i,j,k)) { fabarr[box_no](i,j,k,n) = 0; }
1359 #ifdef AMREX_USE_OMP
1360 #pragma omp parallel if (Gpu::notInLaunchRegion())
1364 const Box& bx = mfi.tilebox();
1365 auto fab = fa.
array(mfi);
1366 auto const ifab = msk.
array(mfi);
1369 if (!ifab(i,j,k)) { fab(i,j,k,n) = 0; }
1380 template <class FAB, class bar = std::enable_if_t<IsBaseFab<FAB>::value> >
1388 fa.
os_temp->ParallelCopy_finish();
1394 template <class FAB, class foo = std::enable_if_t<IsBaseFab<FAB>::value> >
1397 int scomp,
int dcomp,
int ncomp)
1401 #ifdef AMREX_USE_GPU
1403 void*
pdst = dst[mfi].dataPtr(dcomp);
1404 void const* psrc = src[mfi].dataPtr(scomp);
1412 template <class FAB, class foo = std::enable_if_t<IsBaseFab<FAB>::value> >
1419 template <class FAB, class foo = std::enable_if_t<IsBaseFab<FAB>::value> >
1422 int scomp,
int dcomp,
int ncomp)
1426 #ifdef AMREX_USE_GPU
1428 void*
pdst = dst[mfi].dataPtr(dcomp);
1429 void const* psrc = src[mfi].dataPtr(scomp);
1437 template <class FAB, class foo = std::enable_if_t<IsBaseFab<FAB>::value> >
1444 template <class FAB, class foo = std::enable_if_t<IsBaseFab<FAB>::value> >
1447 typename FAB::value_type value)
1451 #ifdef AMREX_USE_GPU
1464 if (ma[box_no](i,j,k,comp) == value) {
1481 if (arr(i,j,k,comp) == value) {
1492 int const* tmp = aa.copyToHost();
1501 #ifdef AMREX_USE_OMP
1502 #pragma omp parallel
1508 const Box& bx = mfi.growntilebox(nghost);
1512 if (fab(i,j,k,comp) == value) {
1523 #if defined(AMREX_USE_OMP) && defined(_OPENMP) && (_OPENMP < 201307 || (defined(__NVCOMPILER) && __NVCOMPILER_MAJOR__ < 23))
1524 #pragma omp critical (amrex_indexfromvalue)
1525 #elif defined(AMREX_USE_OMP)
1526 #pragma omp atomic capture
1533 if (old ==
false) { loc = priv_loc; }
1552 template <typename FAB, std::enable_if_t<IsBaseFab<FAB>::value,
int> FOO = 0>
1553 typename FAB::value_type
1555 IntVect const& nghost,
bool local =
false)
1563 using T =
typename FAB::value_type;
1565 #ifdef AMREX_USE_GPU
1567 auto const& xma =
x.const_arrays();
1573 auto const& xfab = xma[box_no];
1574 auto const& yfab = yma[box_no];
1575 for (
int n = 0; n < ncomp; ++n) {
1576 t += xfab(i,j,k,xcomp+n) * yfab(i,j,k,ycomp+n);
1583 #ifdef AMREX_USE_OMP
1584 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
1588 Box const& bx = mfi.growntilebox(nghost);
1589 auto const& xfab =
x.const_array(mfi);
1593 sm += xfab(i,j,k,xcomp+n) * yfab(i,j,k,ycomp+n);
1606 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1607 void setVal (MF& dst,
typename MF::value_type val)
1613 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1614 void setBndry (MF& dst,
typename MF::value_type val,
int scomp,
int ncomp)
1616 dst.setBndry(val, scomp, ncomp);
1620 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1621 void Scale (MF& dst,
typename MF::value_type val,
int scomp,
int ncomp,
int nghost)
1623 dst.mult(val, scomp, ncomp, nghost);
1627 template <
class DMF,
class SMF,
1628 std::enable_if_t<IsMultiFabLike_v<DMF> &&
1629 IsMultiFabLike_v<SMF>,
int> = 0>
1630 void LocalCopy (DMF& dst, SMF
const& src,
int scomp,
int dcomp,
1631 int ncomp,
IntVect const& nghost)
1633 amrex::Copy(dst, src, scomp, dcomp, ncomp, nghost);
1637 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1638 void LocalAdd (MF& dst, MF
const& src,
int scomp,
int dcomp,
1639 int ncomp,
IntVect const& nghost)
1641 amrex::Add(dst, src, scomp, dcomp, ncomp, nghost);
1645 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1646 void Saxpy (MF& dst,
typename MF::value_type a, MF
const& src,
int scomp,
int dcomp,
1647 int ncomp,
IntVect const& nghost)
1649 MF::Saxpy(dst, a, src, scomp, dcomp, ncomp, nghost);
1653 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1654 void Xpay (MF& dst,
typename MF::value_type a, MF
const& src,
int scomp,
int dcomp,
1655 int ncomp,
IntVect const& nghost)
1657 MF::Xpay(dst, a, src, scomp, dcomp, ncomp, nghost);
1661 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1663 typename MF::value_type a, MF
const& src_a,
int acomp,
1664 typename MF::value_type b, MF
const& src_b,
int bcomp,
1665 int dcomp,
int ncomp,
IntVect const& nghost)
1667 MF::LinComb(dst, a, src_a, acomp,
b, src_b, bcomp, dcomp, ncomp, nghost);
1671 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1672 void ParallelCopy (MF& dst, MF
const& src,
int scomp,
int dcomp,
int ncomp,
1677 dst.ParallelCopy(src, scomp, dcomp, ncomp, ng_src, ng_dst, period);
1680 template <
class MF, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1681 [[nodiscard]]
typename MF::value_type
1685 return mf.norminf(scomp, ncomp, nghost, local);
1689 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1692 for (
auto& mf: dst) {
1698 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1701 for (
auto& mf : dst) {
1707 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1711 for (
auto& mf : dst) {
1712 mf.
mult(val, scomp, ncomp, nghost);
1717 template <
class DMF,
class SMF, std::size_t N,
1718 std::enable_if_t<IsMultiFabLike_v<DMF> &&
1719 IsMultiFabLike_v<SMF>,
int> = 0>
1721 int ncomp,
IntVect const& nghost)
1723 for (std::size_t i = 0; i < N; ++i) {
1724 amrex::Copy(dst[i], src[i], scomp, dcomp, ncomp, nghost);
1729 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1731 int ncomp,
IntVect const& nghost)
1733 for (std::size_t i = 0; i < N; ++i) {
1734 amrex::Add(dst[i], src[i], scomp, dcomp, ncomp, nghost);
1739 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1741 Array<MF,N> const& src,
int scomp,
int dcomp,
int ncomp,
1744 for (std::size_t i = 0; i < N; ++i) {
1745 MF::Saxpy(dst[i], a, src[i], scomp, dcomp, ncomp, nghost);
1750 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1752 Array<MF,N> const& src,
int scomp,
int dcomp,
int ncomp,
1755 for (std::size_t i = 0; i < N; ++i) {
1756 MF::Xpay(dst[i], a, src[i], scomp, dcomp, ncomp, nghost);
1761 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1763 typename MF::value_type a,
Array<MF,N> const& src_a,
int acomp,
1764 typename MF::value_type b,
Array<MF,N> const& src_b,
int bcomp,
1765 int dcomp,
int ncomp,
IntVect const& nghost)
1767 for (std::size_t i = 0; i < N; ++i) {
1768 MF::LinComb(dst[i], a, src_a[i], acomp,
b, src_b[i], bcomp, dcomp, ncomp, nghost);
1773 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1775 int scomp,
int dcomp,
int ncomp,
1780 for (std::size_t i = 0; i < N; ++i) {
1781 dst[i].ParallelCopy(src[i], scomp, dcomp, ncomp, ng_src, ng_dst, period);
1785 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF>,
int> = 0>
1786 [[nodiscard]]
typename MF::value_type
1790 auto r =
typename MF::value_type(0);
1791 for (std::size_t i = 0; i < N; ++i) {
1792 auto tmp = mf[i].norminf(scomp, ncomp, nghost,
true);
1801 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF> && (N > 0),
1805 return mf[0].nComp();
1808 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF> && (N > 0),
1812 return mf[0].nGrowVect();
1815 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF> && (N > 0),
1817 [[nodiscard]] BoxArray
const&
1820 return mf[0].boxArray();
1823 template <
class MF, std::
size_t N, std::enable_if_t<IsMultiFabLike_v<MF> && (N > 0),
1825 [[nodiscard]] DistributionMapping
const&
1828 return mf[0].DistributionMap();
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
Real * pdst
Definition: AMReX_HypreMLABecLap.cpp:1090
#define AMREX_LOOP_3D(bx, i, j, k, block)
Definition: AMReX_Loop.nolint.H:4
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition: AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
Print on all processors of the default communicator.
Definition: AMReX_Print.H:117
virtual bool isManaged() const
Definition: AMReX_Arena.cpp:79
virtual bool isDevice() const
Definition: AMReX_Arena.cpp:91
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition: AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
IntVect nGrowVect() const noexcept
Definition: AMReX_FabArrayBase.H:79
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition: AMReX_FabArrayBase.H:94
bool isFusingCandidate() const noexcept
Is this a good candidate for kernel fusing?
IndexType ixType() const noexcept
Return index type.
Definition: AMReX_FabArrayBase.H:85
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition: AMReX_FabArrayBase.H:130
@ ADD
Definition: AMReX_FabArrayBase.H:393
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition: AMReX_FabArrayBase.H:82
An Array of FortranArrayBox(FAB)-like Objects.
Definition: AMReX_FabArray.H:344
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1593
std::unique_ptr< FabArray< FAB > > os_temp
Definition: AMReX_FabArray.H:1412
void prefetchToDevice(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1549
const FabFactory< FAB > & Factory() const noexcept
Definition: AMReX_FabArray.H:442
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1561
void setBndry(value_type val)
Set all values in the boundary region to val.
Definition: AMReX_FabArray.H:2298
void setVal(value_type val)
Set all components in the entire region of each FAB to val.
Definition: AMReX_FabArray.H:2497
MultiArray4< typename FabArray< FAB >::value_type > arrays() noexcept
Definition: AMReX_FabArray.H:1657
void mult(value_type val, int comp, int num_comp, int nghost=0)
Definition: AMReX_FabArray.H:2750
Arena * arena() const noexcept
Definition: AMReX_FabArray.H:445
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition: AMReX_FabArray.H:1675
void prefetchToHost(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1537
Definition: AMReX_Tuple.H:93
Definition: AMReX_GpuBuffer.H:17
T const * data() const noexcept
Definition: AMReX_GpuBuffer.H:65
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool cellCentered() const noexcept
True if the IndexTypeND is CELL based in all directions.
Definition: AMReX_IndexType.H:101
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition: AMReX_IntVect.H:443
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGT(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than argument for all components. NOTE: This is NOT a strict weak ord...
Definition: AMReX_IntVect.H:418
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheMinVector() noexcept
Definition: AMReX_IntVect.H:720
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_PODVector.H:246
T * data() noexcept
Definition: AMReX_PODVector.H:593
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition: AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition: AMReX_Periodicity.cpp:52
Print & SetPrecision(int p)
Definition: AMReX_Print.H:89
@ FAB
Definition: AMReX_AmrvisConstants.H:86
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T Exch(T *address, T val) noexcept
Definition: AMReX_GpuAtomic.H:485
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
void dtoh_memcpy_async(void *p_h, const void *p_d, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:265
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool inNoSyncRegion() noexcept
Definition: AMReX_GpuControl.H:146
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:251
void Sum(T &v, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:204
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:126
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
@ min
Definition: AMReX_ParallelReduce.H:18
@ max
Definition: AMReX_ParallelReduce.H:17
std::enable_if_t<!amrex::DefinitelyNotHostRunnable< F >::value, typename FAB::value_type > ReduceSum_host_wrapper(FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:110
FAB::value_type ReduceMax_host(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:540
std::enable_if_t< IsBaseFab< FAB >::value, std::conditional_t< std::is_same< OP, ReduceOpLogicalAnd >::value||std::is_same< OP, ReduceOpLogicalOr >::value, int, typename FAB::value_type > > ReduceMF(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:50
std::enable_if_t<!amrex::DefinitelyNotHostRunnable< F >::value, typename FAB::value_type > ReduceMax_host_wrapper(FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:563
std::enable_if_t<!amrex::DefinitelyNotHostRunnable< F >::value, typename FAB::value_type > ReduceMin_host_wrapper(FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:336
bool ReduceLogicalAnd_host(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:767
FAB::value_type ReduceMin_host(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:314
std::enable_if_t<!amrex::DefinitelyNotHostRunnable< F >::value, bool > ReduceLogicalOr_host_wrapper(FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:936
bool ReduceLogicalOr_host(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:914
std::enable_if_t<!amrex::DefinitelyNotHostRunnable< F >::value, bool > ReduceLogicalAnd_host_wrapper(FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:789
FAB::value_type ReduceSum_host(FabArray< FAB > const &fa, IntVect const &nghost, F const &f)
Definition: AMReX_FabArrayUtility.H:24
Definition: AMReX_Amr.cpp:49
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition: AMReX_FabArrayUtility.H:1682
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
int nComp(FabArrayBase const &fa)
FAB::value_type ReduceMax(FabArray< FAB > const &fa, int nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:531
void Saxpy(MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += a * src
Definition: AMReX_FabArrayUtility.H:1646
void Xpay(Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src + a * dst
Definition: AMReX_FabArrayUtility.H:1751
IntVect nGrowVect(FabArrayBase const &fa)
void LocalCopy(DMF &dst, SMF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src
Definition: AMReX_FabArrayUtility.H:1630
void printCell(FabArray< FAB > const &mf, const IntVect &cell, int comp=-1, const IntVect &ng=IntVect::TheZeroVector())
Definition: AMReX_FabArrayUtility.H:1051
bool ReduceLogicalAnd(FabArray< FAB > const &fa, int nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:758
void Abs(FabArray< FAB > &fa, int icomp, int numcomp, int nghost)
Definition: AMReX_FabArrayUtility.H:1247
void LocalAdd(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += src
Definition: AMReX_FabArrayUtility.H:1638
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
void Copy(FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:179
void prefetchToDevice(FabArray< FAB > const &fa, const bool synchronous=true)
Definition: AMReX_FabArrayUtility.H:1306
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T abs(const GpuComplex< T > &a_z) noexcept
Return the absolute value of a complex number.
Definition: AMReX_GpuComplex.H:356
void Subtract(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArrayUtility.H:1104
void OverrideSync(FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
Definition: AMReX_FabArrayUtility.H:1323
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition: AMReX_MFIter.H:219
BoxArray const & boxArray(FabArrayBase const &fa)
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray.
Definition: AMReX_ParReduce.H:47
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > makeSingleCellBox(int i, int j, int k, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType())
Definition: AMReX_Box.H:2009
bool ReduceLogicalOr(FabArray< FAB > const &fa, int nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:905
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
void LinComb(MF &dst, typename MF::value_type a, MF const &src_a, int acomp, typename MF::value_type b, MF const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost)
dst = a*src_a + b*src_b
Definition: AMReX_FabArrayUtility.H:1662
FAB::value_type ReduceMin(FabArray< FAB > const &fa, int nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:305
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
void dtoh_memcpy(FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp)
Definition: AMReX_FabArrayUtility.H:1396
void setBndry(MF &dst, typename MF::value_type val, int scomp, int ncomp)
dst = val in ghost cells.
Definition: AMReX_FabArrayUtility.H:1614
void single_task(L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsC.H:1307
void Divide(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArrayUtility.H:1200
void Xpay(MF &dst, typename MF::value_type a, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst = src + a * dst
Definition: AMReX_FabArrayUtility.H:1654
void Scale(MF &dst, typename MF::value_type val, int scomp, int ncomp, int nghost)
dst *= val
Definition: AMReX_FabArrayUtility.H:1621
void prefetchToHost(FabArray< FAB > const &fa, const bool synchronous=true)
Definition: AMReX_FabArrayUtility.H:1291
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
void Saxpy(Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
dst += a * src
Definition: AMReX_FabArrayUtility.H:1740
void Add(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:240
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
const int[]
Definition: AMReX_BLProfiler.cpp:1664
void OverrideSync_finish(FabArray< FAB > &fa)
Definition: AMReX_FabArrayUtility.H:1382
FAB::value_type Dot(FabArray< FAB > const &x, int xcomp, FabArray< FAB > const &y, int ycomp, int ncomp, IntVect const &nghost, bool local=false)
Compute dot products of two FabArrays.
Definition: AMReX_FabArrayUtility.H:1554
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition: AMReX_FabArrayUtility.H:1672
void htod_memcpy(FabArray< FAB > &dst, FabArray< FAB > const &src, int scomp, int dcomp, int ncomp)
Definition: AMReX_FabArrayUtility.H:1421
void setVal(MF &dst, typename MF::value_type val)
dst = val
Definition: AMReX_FabArrayUtility.H:1607
void Multiply(FabArray< FAB > &dst, FabArray< FAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArrayUtility.H:1152
FAB::value_type ReduceSum(FabArray< FAB > const &fa, int nghost, F &&f)
Definition: AMReX_FabArrayUtility.H:16
IntVect indexFromValue(FabArray< FAB > const &mf, int comp, IntVect const &nghost, typename FAB::value_type value)
Definition: AMReX_FabArrayUtility.H:1446
void LinComb(Array< MF, N > &dst, typename MF::value_type a, Array< MF, N > const &src_a, int acomp, typename MF::value_type b, Array< MF, N > const &src_b, int bcomp, int dcomp, int ncomp, IntVect const &nghost)
dst = a*src_a + b*src_b
Definition: AMReX_FabArrayUtility.H:1762
void OverrideSync_nowait(FabArray< FAB > &fa, FabArray< IFAB > const &msk, const Periodicity &period)
Definition: AMReX_FabArrayUtility.H:1335
std::array< T, N > Array
Definition: AMReX_Array.H:24
integer, parameter dp
Definition: AMReX_SDCquadrature.F90:8
Definition: AMReX_TypeTraits.H:18
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MFIter.H:20
Struct for holding types.
Definition: AMReX_TypeList.H:12