Block-Structured AMR Software Framework
 
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
Loading...
Searching...
No Matches
AMReX_MultiFabUtil.H
Go to the documentation of this file.
1#ifndef AMREX_MultiFabUtil_H_
2#define AMREX_MultiFabUtil_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MultiFab.H>
6#include <AMReX_iMultiFab.H>
7#include <AMReX_LayoutData.H>
8#include <AMReX_MFIter.H>
9#include <AMReX_Array.H>
10#include <AMReX_Vector.H>
12
13#include <AMReX_MultiFabUtilI.H>
14
15namespace amrex
16{
18 void average_node_to_cellcenter (MultiFab& cc, int dcomp,
19 const MultiFab& nd, int scomp,
20 int ncomp, int ngrow = 0);
21
28 void average_edge_to_cellcenter (MultiFab& cc, int dcomp,
29 const Vector<const MultiFab*>& edge,
30 int ngrow = 0);
32 void average_face_to_cellcenter (MultiFab& cc, int dcomp,
33 const Vector<const MultiFab*>& fc,
34 int ngrow = 0);
36 template <typename CMF, typename FMF,
37 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> = 0>
38 void average_face_to_cellcenter (CMF& cc, int dcomp,
39 const Array<const FMF*,AMREX_SPACEDIM>& fc,
40 int ngrow = 0);
42 void average_face_to_cellcenter (MultiFab& cc,
43 const Vector<const MultiFab*>& fc,
44 const Geometry& geom);
46 void average_face_to_cellcenter (MultiFab& cc,
47 const Array<const MultiFab*,AMREX_SPACEDIM>& fc,
48 const Geometry& geom);
50 void average_cellcenter_to_face (const Vector<MultiFab*>& fc,
51 const MultiFab& cc,
52 const Geometry& geom,
53 int ncomp = 1,
54 bool use_harmonic_averaging = false);
56 void average_cellcenter_to_face (const Array<MultiFab*,AMREX_SPACEDIM>& fc,
57 const MultiFab& cc,
58 const Geometry& geom,
59 int ncomp = 1,
60 bool use_harmonic_averaging = false);
61
63 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
64 void average_down_faces (const Vector<const MF*>& fine,
65 const Vector<MF*>& crse,
66 const IntVect& ratio,
67 int ngcrse = 0);
69 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
70 void average_down_faces (const Vector<const MF*>& fine,
71 const Vector<MF*>& crse,
72 int ratio,
73 int ngcrse = 0);
75 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
76 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
77 const Array<MF*,AMREX_SPACEDIM>& crse,
78 const IntVect& ratio,
79 int ngcrse = 0);
81 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
82 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
83 const Array<MF*,AMREX_SPACEDIM>& crse,
84 int ratio,
85 int ngcrse = 0);
92 template <typename FAB>
93 void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
94 const IntVect& ratio, int ngcrse=0);
95
96 // This version takes periodicity into account.
97 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
98 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
99 const Array<MF*,AMREX_SPACEDIM>& crse,
100 const IntVect& ratio, const Geometry& crse_geom);
101 // This version takes periodicity into account.
102 template <typename FAB>
103 void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
104 const IntVect& ratio, const Geometry& crse_geom);
105
107 void average_down_edges (const Vector<const MultiFab*>& fine,
108 const Vector<MultiFab*>& crse,
109 const IntVect& ratio,
110 int ngcrse = 0);
111 void average_down_edges (const Array<const MultiFab*,AMREX_SPACEDIM>& fine,
112 const Array<MultiFab*,AMREX_SPACEDIM>& crse,
113 const IntVect& ratio,
114 int ngcrse = 0);
118 void average_down_edges (const MultiFab& fine, MultiFab& crse,
119 const IntVect& ratio, int ngcrse=0);
120
122 template <typename FAB>
123 void average_down_nodal (const FabArray<FAB>& S_fine,
124 FabArray<FAB>& S_crse,
125 const IntVect& ratio,
126 int ngcrse = 0,
127 bool mfiter_is_definitely_safe=false);
128
135 void average_down (const MultiFab& S_fine, MultiFab& S_crse,
136 const Geometry& fgeom, const Geometry& cgeom,
137 int scomp, int ncomp, const IntVect& ratio);
138 void average_down (const MultiFab& S_fine, MultiFab& S_crse,
139 const Geometry& fgeom, const Geometry& cgeom,
140 int scomp, int ncomp, int rr);
141
145 template<typename FAB>
146 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
147 int scomp, int ncomp, const IntVect& ratio);
148 template<typename FAB>
149 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
150 int scomp, int ncomp, int rr);
151
154 void sum_fine_to_coarse (const MultiFab& S_Fine, MultiFab& S_crse,
155 int scomp, int ncomp,
156 const IntVect& ratio,
157 const Geometry& cgeom, const Geometry& fgeom);
158
160 void print_state (const MultiFab& mf, const IntVect& cell, int n=-1,
161 const IntVect& ng = IntVect::TheZeroVector());
162
164 void writeFabs (const MultiFab& mf, const std::string& name);
165 void writeFabs (const MultiFab& mf, int comp, int ncomp, const std::string& name);
166
169 std::unique_ptr<MultiFab> get_slice_data(int dir, Real coord,
170 const MultiFab& cc,
171 const Geometry& geom, int start_comp, int ncomp,
172 bool interpolate=false,
173 RealBox const& bnd_rbx = RealBox());
174
182 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
183 Vector<typename MF::value_type> get_cell_data (MF const& mf, IntVect const& cell);
184
191 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
192 MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx = Box());
193
197 template <typename FAB>
198 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
199 int crse_value = 0, int fine_value = 1);
200 iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
201 const BoxArray& fba, const IntVect& ratio,
202 int crse_value = 0, int fine_value = 1);
203 template <typename FAB>
204 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
205 Periodicity const& period, int crse_value, int fine_value);
206 iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
207 const IntVect& cnghost, const BoxArray& fba, const IntVect& ratio,
208 Periodicity const& period, int crse_value, int fine_value);
209 template <typename FAB>
210 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
211 const IntVect& cnghost, const IntVect& ratio,
212 Periodicity const& period, int crse_value, int fine_value);
213 template <typename FAB>
214 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
215 const IntVect& cnghost, const IntVect& ratio,
216 Periodicity const& period, int crse_value, int fine_value,
217 LayoutData<int>& has_cf);
218
219 MultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
220 const BoxArray& fba, const IntVect& ratio,
221 Real crse_value, Real fine_value);
222
224 void computeDivergence (MultiFab& divu, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
225 const Geometry& geom);
226
228 void computeGradient (MultiFab& grad, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
229 const Geometry& geom);
230
232 MultiFab ToMultiFab (const iMultiFab& imf);
234 FabArray<BaseFab<Long> > ToLongMultiFab (const iMultiFab& imf);
235
237 MultiFab periodicShift (MultiFab const& mf, IntVect const& offset,
238 Periodicity const& period);
239
241 template <typename T, typename U>
242 T cast (U const& mf_in)
243 {
244 T mf_out(mf_in.boxArray(), mf_in.DistributionMap(), mf_in.nComp(), mf_in.nGrowVect());
245
246#ifdef AMREX_USE_OMP
247#pragma omp parallel if (Gpu::notInLaunchRegion())
248#endif
249 for (MFIter mfi(mf_in); mfi.isValid(); ++mfi)
250 {
251 const Long n = mfi.fabbox().numPts() * mf_in.nComp();
252 auto * pdst = mf_out[mfi].dataPtr();
253 auto const* psrc = mf_in [mfi].dataPtr();
255 {
256 pdst[i] = static_cast<typename U::value_type>(psrc[i]); // NOLINT(bugprone-signed-char-misuse)
257 });
258 }
259 return mf_out;
260 }
261
322 template <typename Op, typename T, typename FAB, typename F,
323 std::enable_if_t<IsBaseFab<FAB>::value
324#ifndef AMREX_USE_CUDA
325 && IsCallableR<T,F,int,int,int,int>::value
326#endif
327 , int> FOO = 0>
328 BaseFab<T>
329 ReduceToPlane (int direction, Box const& domain, FabArray<FAB> const& mf, F const& f);
330
375 template <typename Op, typename FA, typename F,
376 std::enable_if_t<IsMultiFabLike_v<FA>
377#ifndef AMREX_USE_CUDA
378 && IsCallableR<typename FA::value_type,
379 F,int,int,int,int>::value
380#endif
381 , int> FOO = 0>
382 FA ReduceToPlaneMF (int direction, Box const& domain, FA const& mf, F const& f);
383
426 template <typename Op, typename FA, typename F,
427 std::enable_if_t<IsMultiFabLike_v<FA>
428#ifndef AMREX_USE_CUDA
429 && IsCallableR<typename FA::value_type,
430 F,int,int,int,int>::value
431#endif
432 , int> FOO = 0>
433 std::pair<FA,FA>
434 ReduceToPlaneMF2 (int direction, Box const& domain, FA const& mf, F const& f);
435
451 Gpu::HostVector<Real> sumToLine (MultiFab const& mf, int icomp, int ncomp,
452 Box const& domain, int direction, bool local = false);
453
461 Real volumeWeightedSum (Vector<MultiFab const*> const& mf, int icomp,
462 Vector<Geometry> const& geom,
463 Vector<IntVect> const& ratio,
464 bool local = false);
465
479 void FourthOrderInterpFromFineToCoarse (MultiFab& cmf, int scomp, int ncomp,
480 MultiFab const& fmf,
481 IntVect const& ratio);
482
493 void FillRandom (MultiFab& mf, int scomp, int ncomp);
494
506 void FillRandomNormal (MultiFab& mf, int scomp, int ncomp, Real mean, Real stddev);
507
519 [[nodiscard]] Vector<MultiFab> convexify (Vector<MultiFab const*> const& mf,
520 Vector<IntVect> const& refinement_ratio);
521}
522
523namespace amrex {
524
525template <typename FAB>
526iMultiFab
527makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
528 int crse_value, int fine_value)
529{
530 return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
531 fba, ratio, Periodicity::NonPeriodic(), crse_value, fine_value);
532}
533
534template <typename FAB>
535iMultiFab
536makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
537 Periodicity const& period, int crse_value, int fine_value)
538{
539 return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
540 fba, ratio, period, crse_value, fine_value);
541}
542
543template <typename FAB>
544iMultiFab
546 const IntVect& cnghost, const IntVect& ratio,
547 Periodicity const& period, int crse_value, int fine_value)
548{
549 iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
550 mask.setVal(crse_value);
551
552 iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
553 1, 0, MFInfo().SetAlloc(false));
554 const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
555 mask.setVal(fine_value, cpc, 0, 1);
556
557 return mask;
558}
559
560template <typename FAB>
561iMultiFab
563 const IntVect& cnghost, const IntVect& ratio,
564 Periodicity const& period, int crse_value, int fine_value,
565 LayoutData<int>& has_cf)
566{
567 iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
568 mask.setVal(crse_value);
569
570 iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
571 1, 0, MFInfo().SetAlloc(false));
572 const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
573 mask.setVal(fine_value, cpc, 0, 1);
574
575 has_cf = mask.RecvLayoutMask(cpc);
576
577 return mask;
578}
579
582template <typename FAB>
584 const IntVect& ratio, int ngcrse, bool mfiter_is_definitely_safe)
585{
586 AMREX_ASSERT(fine.is_nodal());
587 AMREX_ASSERT(crse.is_nodal());
588 AMREX_ASSERT(crse.nComp() == fine.nComp());
589
590 int ncomp = crse.nComp();
591 using value_type = typename FAB::value_type;
592
593 if (mfiter_is_definitely_safe || isMFIterSafe(fine, crse))
594 {
595#ifdef AMREX_USE_OMP
596#pragma omp parallel if (Gpu::notInLaunchRegion())
597#endif
598 for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
599 {
600 const Box& bx = mfi.growntilebox(ngcrse);
601 Array4<value_type> const& crsearr = crse.array(mfi);
602 Array4<value_type const> const& finearr = fine.const_array(mfi);
603
605 {
606 amrex_avgdown_nodes(tbx,crsearr,finearr,0,0,ncomp,ratio);
607 });
608 }
609 }
610 else
611 {
612 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
613 ncomp, ngcrse);
614 average_down_nodal(fine, ctmp, ratio, ngcrse);
615 crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
616 }
617}
618
619// *************************************************************************************************************
620
621// Average fine cell-based MultiFab onto crse cell-centered MultiFab.
622// We do NOT assume that the coarse layout is a coarsened version of the fine layout.
623// This version does NOT use volume-weighting
624template<typename FAB>
625void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse, int scomp, int ncomp, int rr)
626{
627 average_down(S_fine,S_crse,scomp,ncomp,rr*IntVect::TheUnitVector());
628}
629
630template<typename FAB>
631void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
632 int scomp, int ncomp, const IntVect& ratio)
633{
634 BL_PROFILE("amrex::average_down");
635 AMREX_ASSERT(S_crse.nComp() == S_fine.nComp());
636 AMREX_ASSERT((S_crse.is_cell_centered() && S_fine.is_cell_centered()) ||
637 (S_crse.is_nodal() && S_fine.is_nodal()));
638
639 using value_type = typename FAB::value_type;
640
641 bool is_cell_centered = S_crse.is_cell_centered();
642
643 //
644 // Coarsen() the fine stuff on processors owning the fine data.
645 //
646 BoxArray crse_S_fine_BA = S_fine.boxArray(); crse_S_fine_BA.coarsen(ratio);
647
648 if (crse_S_fine_BA == S_crse.boxArray() && S_fine.DistributionMap() == S_crse.DistributionMap())
649 {
650#ifdef AMREX_USE_GPU
651 if (Gpu::inLaunchRegion() && S_crse.isFusingCandidate()) {
652 auto const& crsema = S_crse.arrays();
653 auto const& finema = S_fine.const_arrays();
654 if (is_cell_centered) {
655 ParallelFor(S_crse, IntVect(0), ncomp,
656 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
657 {
658 amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
659 });
660 } else {
661 ParallelFor(S_crse, IntVect(0), ncomp,
662 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
663 {
664 amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
665 });
666 }
667 if (!Gpu::inNoSyncRegion()) {
669 }
670 } else
671#endif
672 {
673#ifdef AMREX_USE_OMP
674#pragma omp parallel if (Gpu::notInLaunchRegion())
675#endif
676 for (MFIter mfi(S_crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
677 {
678 // NOTE: The tilebox is defined at the coarse level.
679 const Box& bx = mfi.tilebox();
680 Array4<value_type> const& crsearr = S_crse.array(mfi);
681 Array4<value_type const> const& finearr = S_fine.const_array(mfi);
682
683 if (is_cell_centered) {
684 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
685 {
686 amrex_avgdown(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
687 });
688 } else {
689 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
690 {
691 amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
692 });
693 }
694 }
695 }
696 }
697 else
698 {
699 FabArray<FAB> crse_S_fine(crse_S_fine_BA, S_fine.DistributionMap(), ncomp, 0, MFInfo(),DefaultFabFactory<FAB>());
700
701#ifdef AMREX_USE_GPU
702 if (Gpu::inLaunchRegion() && crse_S_fine.isFusingCandidate()) {
703 auto const& crsema = crse_S_fine.arrays();
704 auto const& finema = S_fine.const_arrays();
705 if (is_cell_centered) {
706 ParallelFor(crse_S_fine, IntVect(0), ncomp,
707 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
708 {
709 amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
710 });
711 } else {
712 ParallelFor(crse_S_fine, IntVect(0), ncomp,
713 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
714 {
715 amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
716 });
717 }
718 if (!Gpu::inNoSyncRegion()) {
720 }
721 } else
722#endif
723 {
724#ifdef AMREX_USE_OMP
725#pragma omp parallel if (Gpu::notInLaunchRegion())
726#endif
727 for (MFIter mfi(crse_S_fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
728 {
729 // NOTE: The tilebox is defined at the coarse level.
730 const Box& bx = mfi.tilebox();
731 Array4<value_type> const& crsearr = crse_S_fine.array(mfi);
732 Array4<value_type const> const& finearr = S_fine.const_array(mfi);
733
734 // NOTE: We copy from component scomp of the fine fab into component 0 of the crse fab
735 // because the crse fab is a temporary which was made starting at comp 0, it is
736 // not part of the actual crse multifab which came in.
737
738 if (is_cell_centered) {
739 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
740 {
741 amrex_avgdown(i,j,k,n,crsearr,finearr,0,scomp,ratio);
742 });
743 } else {
744 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
745 {
746 amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,0,scomp,ratio);
747 });
748 }
749 }
750 }
751
752 S_crse.ParallelCopy(crse_S_fine,0,scomp,ncomp);
753 }
754}
755
756
757
758
759
767template <typename F>
768Real
769NormHelper (const MultiFab& x, int xcomp,
770 const MultiFab& y, int ycomp,
771 F const& f,
772 int numcomp, IntVect nghost, bool local)
773{
774 BL_ASSERT(x.boxArray() == y.boxArray());
775 BL_ASSERT(x.DistributionMap() == y.DistributionMap());
776 BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
777
778 Real sm = Real(0.0);
779#ifdef AMREX_USE_GPU
780 if (Gpu::inLaunchRegion()) {
781 auto const& xma = x.const_arrays();
782 auto const& yma = y.const_arrays();
784 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
785 {
786 Real t = Real(0.0);
787 auto const& xfab = xma[box_no];
788 auto const& yfab = yma[box_no];
789 for (int n = 0; n < numcomp; ++n) {
790 t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
791 }
792 return t;
793 });
794 } else
795#endif
796 {
797#ifdef AMREX_USE_OMP
798#pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
799#endif
800 for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
801 {
802 Box const& bx = mfi.growntilebox(nghost);
803 Array4<Real const> const& xfab = x.const_array(mfi);
804 Array4<Real const> const& yfab = y.const_array(mfi);
805 AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
806 {
807 sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
808 });
809 }
810 }
811
812 if (!local) {
814 }
815
816 return sm;
817}
818
827template <typename MMF, typename Pred, typename F>
828Real
829NormHelper (const MMF& mask,
830 const MultiFab& x, int xcomp,
831 const MultiFab& y, int ycomp,
832 Pred const& pf,
833 F const& f,
834 int numcomp, IntVect nghost, bool local)
835{
836 BL_ASSERT(x.boxArray() == y.boxArray());
837 BL_ASSERT(x.boxArray() == mask.boxArray());
838 BL_ASSERT(x.DistributionMap() == y.DistributionMap());
839 BL_ASSERT(x.DistributionMap() == mask.DistributionMap());
840 BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
841 BL_ASSERT(mask.nGrowVect().allGE(nghost));
842
843 Real sm = Real(0.0);
844#ifdef AMREX_USE_GPU
845 if (Gpu::inLaunchRegion()) {
846 auto const& xma = x.const_arrays();
847 auto const& yma = y.const_arrays();
848 auto const& mma = mask.const_arrays();
850 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
851 {
852 Real t = Real(0.0);
853 if (pf(mma[box_no](i,j,k))) {
854 auto const& xfab = xma[box_no];
855 auto const& yfab = yma[box_no];
856 for (int n = 0; n < numcomp; ++n) {
857 t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
858 }
859 }
860 return t;
861 });
862 } else
863#endif
864 {
865#ifdef AMREX_USE_OMP
866#pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
867#endif
868 for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
869 {
870 Box const& bx = mfi.growntilebox(nghost);
871 Array4<Real const> const& xfab = x.const_array(mfi);
872 Array4<Real const> const& yfab = y.const_array(mfi);
873 auto const& mfab = mask.const_array(mfi);
874 AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
875 {
876 if (pf(mfab(i,j,k))) {
877 sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
878 }
879 });
880 }
881 }
882
883 if (!local) {
885 }
886
887 return sm;
888}
889
890template <typename CMF, typename FMF,
891 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> FOO>
892void average_face_to_cellcenter (CMF& cc, int dcomp,
894 int ngrow)
895{
896 AMREX_ASSERT(cc.nComp() >= dcomp + AMREX_SPACEDIM);
897 AMREX_ASSERT(fc[0]->nComp() == 1);
898
899#ifdef AMREX_USE_GPU
900 if (Gpu::inLaunchRegion() && cc.isFusingCandidate()) {
901 auto const& ccma = cc.arrays();
902 AMREX_D_TERM(auto const& fxma = fc[0]->const_arrays();,
903 auto const& fyma = fc[1]->const_arrays();,
904 auto const& fzma = fc[2]->const_arrays(););
905 ParallelFor(cc, IntVect(ngrow),
906 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
907 {
908#if (AMREX_SPACEDIM == 1)
909 GeometryData gd{};
910 gd.coord = 0;
911#endif
912 amrex_avg_fc_to_cc(i,j,k, ccma[box_no], AMREX_D_DECL(fxma[box_no],
913 fyma[box_no],
914 fzma[box_no]),
915 dcomp
916#if (AMREX_SPACEDIM == 1)
917 , gd
918#endif
919 );
920 });
921 if (!Gpu::inNoSyncRegion()) {
923 }
924 } else
925#endif
926 {
927#ifdef AMREX_USE_OMP
928#pragma omp parallel if (Gpu::notInLaunchRegion())
929#endif
930 for (MFIter mfi(cc,TilingIfNotGPU()); mfi.isValid(); ++mfi)
931 {
932 const Box bx = mfi.growntilebox(ngrow);
933 auto const& ccarr = cc.array(mfi);
934 AMREX_D_TERM(auto const& fxarr = fc[0]->const_array(mfi);,
935 auto const& fyarr = fc[1]->const_array(mfi);,
936 auto const& fzarr = fc[2]->const_array(mfi););
937
938#if (AMREX_SPACEDIM == 1)
940 {
941 GeometryData gd;
942 gd.coord = 0;
943 amrex_avg_fc_to_cc(i,j,k, ccarr, fxarr, dcomp, gd);
944 });
945#else
947 {
948 amrex_avg_fc_to_cc(i,j,k, ccarr, AMREX_D_DECL(fxarr,fyarr,fzarr), dcomp);
949 });
950#endif
951 }
952 }
953}
954
955template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
957 const Vector<MF*>& crse,
958 const IntVect& ratio, int ngcrse)
959{
960 AMREX_ASSERT(fine.size() == AMREX_SPACEDIM && crse.size() == AMREX_SPACEDIM);
962 {{AMREX_D_DECL(fine[0],fine[1],fine[2])}},
964 {{AMREX_D_DECL(crse[0],crse[1],crse[2])}},
965 ratio, ngcrse);
966}
967
968template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
970 const Vector<MF*>& crse, int ratio, int ngcrse)
971{
972 average_down_faces(fine,crse,IntVect{ratio},ngcrse);
973}
974
975template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
978 int ratio, int ngcrse)
979{
980 average_down_faces(fine,crse,IntVect{ratio},ngcrse);
981}
982
983template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
986 const IntVect& ratio, int ngcrse)
987{
988 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
989 average_down_faces(*fine[idim], *crse[idim], ratio, ngcrse);
990 }
991}
992
993template <typename FAB>
995 const IntVect& ratio, int ngcrse)
996{
997 BL_PROFILE("average_down_faces");
998
999 AMREX_ASSERT(crse.nComp() == fine.nComp());
1000 AMREX_ASSERT(fine.ixType() == crse.ixType());
1001 const auto type = fine.ixType();
1002 int dir;
1003 for (dir = 0; dir < AMREX_SPACEDIM; ++dir) {
1004 if (type.nodeCentered(dir)) { break; }
1005 }
1006 auto tmptype = type;
1007 tmptype.unset(dir);
1008 if (dir >= AMREX_SPACEDIM || !tmptype.cellCentered()) {
1009 amrex::Abort("average_down_faces: not face index type");
1010 }
1011 const int ncomp = crse.nComp();
1012 if (isMFIterSafe(fine, crse))
1013 {
1014#ifdef AMREX_USE_GPU
1015 if (Gpu::inLaunchRegion() && crse.isFusingCandidate()) {
1016 auto const& crsema = crse.arrays();
1017 auto const& finema = fine.const_arrays();
1018 ParallelFor(crse, IntVect(ngcrse), ncomp,
1019 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1020 {
1021 amrex_avgdown_faces(i,j,k,n, crsema[box_no], finema[box_no], 0, 0, ratio, dir);
1022 });
1023 if (!Gpu::inNoSyncRegion()) {
1025 }
1026 } else
1027#endif
1028 {
1029#ifdef AMREX_USE_OMP
1030#pragma omp parallel if (Gpu::notInLaunchRegion())
1031#endif
1032 for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1033 {
1034 const Box& bx = mfi.growntilebox(ngcrse);
1035 auto const& crsearr = crse.array(mfi);
1036 auto const& finearr = fine.const_array(mfi);
1037 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1038 {
1039 amrex_avgdown_faces(i,j,k,n, crsearr, finearr, 0, 0, ratio, dir);
1040 });
1041 }
1042 }
1043 }
1044 else
1045 {
1046 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
1047 ncomp, ngcrse, MFInfo(), DefaultFabFactory<FAB>());
1048 average_down_faces(fine, ctmp, ratio, ngcrse);
1049 crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
1050 }
1051}
1052
1053template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
1056 const IntVect& ratio, const Geometry& crse_geom)
1057{
1058 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1059 average_down_faces(*fine[idim], *crse[idim], ratio, crse_geom);
1060 }
1061}
1062
1063template <typename FAB>
1065 const IntVect& ratio, const Geometry& crse_geom)
1066{
1067 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
1068 crse.nComp(), 0);
1069 average_down_faces(fine, ctmp, ratio, 0);
1070 crse.ParallelCopy(ctmp,0,0,crse.nComp(),0,0,crse_geom.periodicity());
1071}
1072
1073template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
1075{
1076 using T = typename MF::value_type;
1077 const int ncomp = mf.nComp();
1078 Gpu::DeviceVector<T> dv(ncomp);
1079 auto* dp = dv.data();
1080 bool found = false;
1081 auto loc = cell.dim3();
1082 for (MFIter mfi(mf); mfi.isValid() && !found; ++mfi)
1083 {
1084 Box const& box = mfi.validbox();
1085 if (box.contains(cell)) {
1086 found = true;
1087 auto const& fab = mf.const_array(mfi);
1088 amrex::ParallelFor(1, [=] AMREX_GPU_DEVICE (int) noexcept
1089 {
1090 for (int n = 0; n < ncomp; ++n) {
1091 dp[n] = fab(loc.x,loc.y,loc.z,n);
1092 }
1093 });
1094 }
1095 }
1096 Vector<T> hv;
1097 if (found) {
1098 hv.resize(ncomp);
1099 Gpu::copy(Gpu::deviceToHost, dv.begin(), dv.end(), hv.begin());
1100 }
1101 return hv;
1102}
1103
1104template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
1105MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx)
1106{
1107 bool do_bnd = (!bnd_bx.isEmpty());
1108
1109 BoxArray const& ba = mf.boxArray();
1110 DistributionMapping const& dm = mf.DistributionMap();
1111 const auto nboxes = static_cast<int>(ba.size());
1112
1113 BoxList bl(ba.ixType());
1114 Vector<int> procmap;
1115 Vector<int> index_map;
1116 if (!do_bnd) {
1117 for (int i = 0; i < nboxes; ++i) {
1118 Box const& b = ba[i];
1119 IntVect lo = cell;
1120 lo[dir] = b.smallEnd(dir);
1121 if (b.contains(lo)) {
1122 IntVect hi = lo;
1123 hi[dir] = b.bigEnd(dir);
1124 Box b1d(lo,hi,b.ixType());
1125 bl.push_back(b1d);
1126 procmap.push_back(dm[i]);
1127 index_map.push_back(i);
1128 }
1129 }
1130 } else {
1131 for (int i = 0; i < nboxes; ++i) {
1132 Box const& b = ba[i];
1133 Box const& b1d = bnd_bx & b;
1134 if (b1d.ok()) {
1135 bl.push_back(b1d);
1136 procmap.push_back(dm[i]);
1137 index_map.push_back(i);
1138 }
1139 }
1140 }
1141
1142 if (bl.isEmpty()) {
1143 return MF();
1144 } else {
1145 BoxArray rba(std::move(bl));
1146 DistributionMapping rdm(std::move(procmap));
1147 MF rmf(rba, rdm, mf.nComp(), IntVect(0),
1148 MFInfo().SetArena(mf.arena()));
1149#ifdef AMREX_USE_OMP
1150#pragma omp parallel if (Gpu::notInLaunchRegion())
1151#endif
1152 for (MFIter mfi(rmf); mfi.isValid(); ++mfi) {
1153 Box const& b = mfi.validbox();
1154 auto const& dfab = rmf.array(mfi);
1155 auto const& sfab = mf.const_array(index_map[mfi.index()]);
1156 amrex::ParallelFor(b, mf.nComp(),
1157 [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
1158 {
1159 dfab(i,j,k,n) = sfab(i,j,k,n);
1160 });
1161 }
1162 return rmf;
1163 }
1164}
1165
1166namespace detail {
1167template <typename Op, typename T, typename F>
1168void reduce_to_plane (Array4<T> const& ar, int direction, Box const& bx, int box_no,
1169 F const& f)
1170{
1171#if defined(AMREX_USE_GPU)
1172 Box b2d = bx;
1173 b2d.setRange(direction,0);
1174 const auto blo = amrex::lbound(bx);
1175 const auto len = amrex::length(bx);
1176 constexpr int nthreads = 128;
1177 auto nblocks = static_cast<int>(b2d.numPts());
1178#ifdef AMREX_USE_SYCL
1179 constexpr std::size_t shared_mem_bytes = sizeof(T)*Gpu::Device::warp_size;
1180 amrex::launch<nthreads>(nblocks, shared_mem_bytes, Gpu::gpuStream(),
1181 [=] AMREX_GPU_DEVICE (Gpu::Handler const& h)
1182 {
1183 int bid = h.blockIdx();
1184 int tid = h.threadIdx();
1185#else
1186 amrex::launch<nthreads>(nblocks, Gpu::gpuStream(),
1187 [=] AMREX_GPU_DEVICE ()
1188 {
1189 int bid = blockIdx.x;
1190 int tid = threadIdx.x;
1191#endif
1192 T tmp;
1193 Op().init(tmp);
1194 T* p;
1195 if (direction == 0) {
1196 int k = bid / len.y;
1197 int j = bid - k*len.y;
1198 k += blo.z;
1199 j += blo.y;
1200 for (int i = blo.x + tid; i < blo.x+len.x; i += nthreads) {
1201 Op().local_update(tmp, f(box_no,i,j,k));
1202 }
1203 p = ar.ptr(0,j,k);
1204 } else if (direction == 1) {
1205 int k = bid / len.x;
1206 int i = bid - k*len.x;
1207 k += blo.z;
1208 i += blo.x;
1209 for (int j = blo.y + tid; j < blo.y+len.y; j += nthreads) {
1210 Op().local_update(tmp, f(box_no,i,j,k));
1211 }
1212 p = ar.ptr(i,0,k);
1213 } else {
1214 int j = bid / len.x;
1215 int i = bid - j*len.x;
1216 j += blo.y;
1217 i += blo.x;
1218 for (int k = blo.z + tid; k < blo.z+len.z; k += nthreads) {
1219 Op().local_update(tmp, f(box_no,i,j,k));
1220 }
1221 p = ar.ptr(i,j,0);
1222 }
1223#ifdef AMREX_USE_SYCL
1224 Op().template parallel_update<T>(*p, tmp, h);
1225#else
1226 Op().template parallel_update<T,nthreads>(*p, tmp);
1227#endif
1228 });
1229#else
1230 // CPU
1231 if (direction == 0) {
1232 AMREX_LOOP_3D(bx, i, j, k,
1233 {
1234 Op().local_update(ar(0,j,k), f(box_no,i,j,k));
1235 });
1236 } else if (direction == 1) {
1237 AMREX_LOOP_3D(bx, i, j, k,
1238 {
1239 Op().local_update(ar(i,0,k), f(box_no,i,j,k));
1240 });
1241 } else {
1242 AMREX_LOOP_3D(bx, i, j, k,
1243 {
1244 Op().local_update(ar(i,j,0), f(box_no,i,j,k));
1245 });
1246 }
1247#endif
1248}
1249}
1250
1251template <typename Op, typename T, typename FAB, typename F,
1252 std::enable_if_t<IsBaseFab<FAB>::value
1253#ifndef AMREX_USE_CUDA
1255#endif
1256 , int> FOO>
1258ReduceToPlane (int direction, Box const& a_domain, FabArray<FAB> const& mf, F const& f)
1259{
1260 Box const domain = amrex::convert(a_domain, mf.ixType());
1261
1262 Box domain2d = domain;
1263 domain2d.setRange(direction, 0);
1264
1265 T initval;
1266 Op().init(initval);
1267
1268 BaseFab<T> r(domain2d);
1269 r.template setVal<RunOn::Device>(initval);
1270 auto const& ar = r.array();
1271
1272 for (MFIter mfi(mf,MFItInfo().UseDefaultStream().DisableDeviceSync());
1273 mfi.isValid(); ++mfi)
1274 {
1275 Box bx = mfi.validbox() & domain;
1276 if (bx.ok()) {
1277 int box_no = mfi.LocalIndex();
1278 detail::reduce_to_plane<Op, T>(ar, direction, bx, box_no, f);
1279 }
1280 }
1282
1283 return r;
1284}
1285
1286namespace detail {
1287template <typename Op, typename FA, typename F>
1288FA reduce_to_plane (int direction, Box const& domain, FA const& mf, F const& f)
1289{
1290 using T = typename FA::value_type;
1291
1292 Box const ndomain = amrex::convert(domain, mf.ixType());
1293
1294 auto npts = amrex::convert(mf.boxArray(),IntVect(0)).numPts();
1295 if (npts != ndomain.numPts()) {
1296 amrex::Abort("ReduceToPlaneMF: mf's BoxArray must have a rectangular domain.");
1297 }
1298
1299 T initval;
1300 Op().init(initval);
1301
1302 BoxList bl = mf.boxArray().boxList();
1303 for (auto& b : bl) {
1304 b.setRange(direction, 0);
1305 }
1306 BoxArray ba(std::move(bl));
1307 FA tmpfa(ba, mf.DistributionMap(), 1, 0);
1308 tmpfa.setVal(initval);
1309
1310 for (MFIter mfi(mf); mfi.isValid(); ++mfi)
1311 {
1312 Box bx = mfi.validbox() & ndomain;
1313 if (bx.ok()) {
1314 int box_no = mfi.LocalIndex();
1315 detail::reduce_to_plane<Op, T>(tmpfa.array(mfi), direction, bx, box_no, f);
1316 }
1317 }
1318
1319 return tmpfa;
1320}
1321}
1322
1323template <typename Op, typename FA, typename F,
1324 std::enable_if_t<IsMultiFabLike_v<FA>
1325#ifndef AMREX_USE_CUDA
1326 && IsCallableR<typename FA::value_type,
1327 F,int,int,int,int>::value
1328#endif
1329 , int> FOO>
1330FA ReduceToPlaneMF (int direction, Box const& domain, FA const& mf, F const& f)
1331{
1332 auto [fa3, fa2] = ReduceToPlaneMF2<Op>(direction, domain, mf, f);
1333 fa3.ParallelCopy(fa2);
1334 return std::move(fa3);
1335}
1336
1337template <typename Op, typename FA, typename F,
1338 std::enable_if_t<IsMultiFabLike_v<FA>
1339#ifndef AMREX_USE_CUDA
1340 && IsCallableR<typename FA::value_type,
1341 F,int,int,int,int>::value
1342#endif
1343 , int> FOO>
1344std::pair<FA,FA>
1345ReduceToPlaneMF2 (int direction, Box const& domain, FA const& mf, F const& f)
1346{
1347 using T = typename FA::value_type;
1348
1349 T initval;
1350 Op().init(initval);
1351
1352 auto tmpmf = detail::reduce_to_plane<Op>(direction, domain, mf, f);
1353
1354 Box domain2d = amrex::convert(domain, mf.ixType());
1355 domain2d.setRange(direction, 0);
1356
1357 BoxArray ba2d(domain2d);
1358 ba2d.maxSize(mf.boxArray()[0].length());
1359 DistributionMapping dm2d(ba2d);
1360
1361 FA mf2d(ba2d, dm2d, 1, 0);
1362 mf2d.setVal(initval);
1363
1364 static_assert(std::is_same_v<Op, ReduceOpSum>, "Currently only ReduceOpSum is supported.");
1365 mf2d.ParallelAdd(tmpmf);
1366
1367 return std::make_pair(std::move(tmpmf), std::move(mf2d));
1368}
1369
1370}
1371
1372#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_HOST_DEVICE_PARALLEL_FOR_1D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:109
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:110
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition AMReX_GpuLaunch.nolint.H:16
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
Real * pdst
Definition AMReX_HypreMLABecLap.cpp:1090
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_3D(bx, i, j, k, block)
Definition AMReX_Loop.nolint.H:4
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:129
A FortranArrayBox(FAB)-like object.
Definition AMReX_BaseFab.H:183
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:550
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition AMReX_BoxArray.H:840
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition AMReX_BoxArray.H:597
BoxArray & maxSize(int block_size)
Forces each Box in BoxArray to have sides <= block_size.
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
bool isEmpty() const noexcept
Is this BoxList empty?
Definition AMReX_BoxList.H:135
void push_back(const Box &bn)
Append a Box to this BoxList.
Definition AMReX_BoxList.H:93
AMREX_GPU_HOST_DEVICE IndexTypeND< dim > ixType() const noexcept
Returns the indexing type.
Definition AMReX_Box.H:127
AMREX_GPU_HOST_DEVICE bool isEmpty() const noexcept
Checks if it is an empty BoxND.
Definition AMReX_Box.H:196
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition AMReX_Box.H:346
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition AMReX_Box.H:204
AMREX_GPU_HOST_DEVICE BoxND & setRange(int dir, int sm_index, int n_cells=1) noexcept
Set the entire range in a given direction, starting at sm_index with length n_cells....
Definition AMReX_Box.H:1046
Definition AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:41
IntVect nGrowVect() const noexcept
Definition AMReX_FabArrayBase.H:79
bool isFusingCandidate() const noexcept
Is this a good candidate for kernel fusing?
bool is_cell_centered() const noexcept
This tests on whether the FabArray is cell-centered.
bool is_nodal() const noexcept
This tests on whether the FabArray is fully nodal.
IndexType ixType() const noexcept
Return index type.
Definition AMReX_FabArrayBase.H:85
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:130
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition AMReX_FabArrayBase.H:82
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:94
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:344
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:584
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition AMReX_FabArray.H:840
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:560
MultiArray4< typename FabArray< FAB >::value_type > arrays() noexcept
Definition AMReX_FabArray.H:632
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:646
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:355
Definition AMReX_Tuple.H:93
static AMREX_EXPORT constexpr int warp_size
Definition AMReX_GpuDevice.H:173
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition AMReX_IntVect.H:441
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 dim3() const noexcept
Definition AMReX_IntVect.H:163
AMREX_GPU_HOST_DEVICE static AMREX_FORCE_INLINE constexpr IntVectND< dim > TheUnitVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:680
AMREX_GPU_HOST_DEVICE static AMREX_FORCE_INLINE constexpr IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:670
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Definition AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:141
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:38
Definition AMReX_PODVector.H:262
iterator begin() noexcept
Definition AMReX_PODVector.H:617
iterator end() noexcept
Definition AMReX_PODVector.H:621
T * data() noexcept
Definition AMReX_PODVector.H:609
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition AMReX_Periodicity.cpp:52
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:27
Definition AMReX_iMultiFab.H:32
void copy(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:121
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:99
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:86
bool inNoSyncRegion() noexcept
Definition AMReX_GpuControl.H:146
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:218
void Sum(T &v, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:204
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
void reduce_to_plane(Array4< T > const &ar, int direction, Box const &bx, int box_no, F const &f)
Definition AMReX_MultiFabUtil.H:1168
Definition AMReX_Amr.cpp:49
void FillRandomNormal(MultiFab &mf, int scomp, int ncomp, Real mean, Real stddev)
Fill MultiFab with random numbers from normal distribution.
Definition AMReX_MultiFabUtil.cpp:1212
int nComp(FabArrayBase const &fa)
void FillRandom(MultiFab &mf, int scomp, int ncomp)
Fill MultiFab with random numbers from uniform distribution.
Definition AMReX_MultiFabUtil.cpp:1199
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:191
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:314
BoxND< AMREX_SPACEDIM > Box
Definition AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avg_fc_to_cc(int i, int, int, Array4< CT > const &cc, Array4< FT const > const &fx, int cccomp, GeometryData const &gd) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:33
std::unique_ptr< MultiFab > get_slice_data(int dir, Real coord, const MultiFab &cc, const Geometry &geom, int start_comp, int ncomp, bool interpolate, RealBox const &bnd_rbx)
Definition AMReX_MultiFabUtil.cpp:552
Vector< typename MF::value_type > get_cell_data(MF const &mf, IntVect const &cell)
Get data in a cell of MultiFab/FabArray.
Definition AMReX_MultiFabUtil.H:1074
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition AMReX_Box.H:1435
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray.
Definition AMReX_ParReduce.H:47
BaseFab< T > ReduceToPlane(int direction, Box const &domain, FabArray< FAB > const &mf, F const &f)
Reduce FabArray/MultiFab data to a plane Fab.
Definition AMReX_MultiFabUtil.H:1258
std::pair< FA, FA > ReduceToPlaneMF2(int direction, Box const &domain, FA const &mf, F const &f)
Reduce FabArray/MultiFab data to plane FabArray.
Definition AMReX_MultiFabUtil.H:1345
Vector< MultiFab > convexify(Vector< MultiFab const * > const &mf, Vector< IntVect > const &refinement_ratio)
Convexify AMR data.
Definition AMReX_MultiFabUtil.cpp:1225
FA ReduceToPlaneMF(int direction, Box const &domain, FA const &mf, F const &f)
Reduce FabArray/MultiFab data to plane FabArray.
Definition AMReX_MultiFabUtil.H:1330
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition AMReX_MultiFabUtil.H:956
MultiFab periodicShift(MultiFab const &mf, IntVect const &offset, Periodicity const &period)
Periodic shift MultiFab.
Definition AMReX_MultiFabUtil.cpp:792
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown_faces(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio, int) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:130
Real volumeWeightedSum(Vector< MultiFab const * > const &mf, int icomp, Vector< Geometry > const &geom, Vector< IntVect > const &ratio, bool local)
Volume weighted sum for a vector of MultiFabs.
Definition AMReX_MultiFabUtil.cpp:958
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition AMReX_MFIter.H:219
MultiFab ToMultiFab(const iMultiFab &imf)
Convert iMultiFab to MultiFab.
Definition AMReX_MultiFabUtil.cpp:542
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:312
void writeFabs(const MultiFab &mf, const std::string &name)
Write each fab individually.
Definition AMReX_MultiFabUtil.cpp:529
Real NormHelper(const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, F const &f, int numcomp, IntVect nghost, bool local)
Returns part of a norm based on two MultiFabs.
Definition AMReX_MultiFabUtil.H:769
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:1304
void average_down_edges(const Vector< const MultiFab * > &fine, const Vector< MultiFab * > &crse, const IntVect &ratio, int ngcrse)
Average fine edge-based MultiFab onto crse edge-based MultiFab.
Definition AMReX_MultiFabUtil.cpp:447
IntVectND< AMREX_SPACEDIM > IntVect
Definition AMReX_BaseFwd.H:30
Gpu::HostVector< Real > sumToLine(MultiFab const &mf, int icomp, int ncomp, Box const &domain, int direction, bool local)
Sum MultiFab data to line.
Definition AMReX_MultiFabUtil.cpp:816
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow)
Average face-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:141
AMREX_GPU_HOST_DEVICE void cast(BaseFab< Tto > &tofab, BaseFab< Tfrom > const &fromfab, Box const &bx, SrcComp scomp, DestComp dcomp, NumComps ncomp) noexcept
Definition AMReX_BaseFabUtility.H:13
void print_state(const MultiFab &mf, const IntVect &cell, const int n, const IntVect &ng)
Output state data for a single zone.
Definition AMReX_MultiFabUtil.cpp:524
void computeDivergence(MultiFab &divu, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom)
Computes divergence of face-data stored in the umac MultiFab.
Definition AMReX_MultiFabUtil.cpp:691
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
void sum_fine_to_coarse(const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio, const Geometry &cgeom, const Geometry &)
Definition AMReX_MultiFabUtil.cpp:393
void average_cellcenter_to_face(const Vector< MultiFab * > &fc, const MultiFab &cc, const Geometry &geom, int ncomp, bool use_harmonic_averaging)
Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.
Definition AMReX_MultiFabUtil.cpp:218
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 length(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:326
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:195
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
const int[]
Definition AMReX_BLProfiler.cpp:1664
void average_node_to_cellcenter(MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, int ngrow)
Average nodal-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:62
void FourthOrderInterpFromFineToCoarse(MultiFab &cmf, int scomp, int ncomp, MultiFab const &fmf, IntVect const &ratio)
Fourth-order interpolation from fine to coarse level.
Definition AMReX_MultiFabUtil.cpp:1112
FabArray< BaseFab< Long > > ToLongMultiFab(const iMultiFab &imf)
Convert iMultiFab to Long.
Definition AMReX_MultiFabUtil.cpp:547
MF get_line_data(MF const &mf, int dir, IntVect const &cell, Box const &bnd_bx=Box())
Get data in a line of MultiFab/FabArray.
Definition AMReX_MultiFabUtil.H:1105
void average_edge_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, int ngrow)
Average edge-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:97
void computeGradient(MultiFab &grad, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom)
Computes gradient of face-data stored in the umac MultiFab.
Definition AMReX_MultiFabUtil.cpp:743
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown_nodes(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:253
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
Definition AMReX_MultiFabUtil.cpp:607
std::array< T, N > Array
Definition AMReX_Array.H:24
void average_down_nodal(const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, const IntVect &ratio, int ngcrse=0, bool mfiter_is_definitely_safe=false)
Average fine node-based MultiFab onto crse node-centered MultiFab.
Definition AMReX_MultiFabUtil.H:583
Definition AMReX_FabArrayCommI.H:896
Definition AMReX_Array4.H:61
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE T * ptr(int i, int j, int k) const noexcept
Definition AMReX_Array4.H:149
parallel copy or add
Definition AMReX_FabArrayBase.H:536
Definition AMReX_Geometry.H:25
int coord
Definition AMReX_Geometry.H:59
Definition AMReX_GpuTypes.H:86
Test if a given type T is callable with arguments of type Args...
Definition AMReX_TypeTraits.H:207
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
Struct for holding types.
Definition AMReX_TypeList.H:12