Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_MultiFabUtil.H
Go to the documentation of this file.
1#ifndef AMREX_MultiFabUtil_H_
2#define AMREX_MultiFabUtil_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MultiFab.H>
6#include <AMReX_iMultiFab.H>
7#include <AMReX_LayoutData.H>
8#include <AMReX_MFIter.H>
9#include <AMReX_Array.H>
10#include <AMReX_Vector.H>
12
13#include <AMReX_MultiFabUtilI.H>
14
15namespace amrex
16{
18 void average_node_to_cellcenter (MultiFab& cc, int dcomp,
19 const MultiFab& nd, int scomp,
20 int ncomp, int ngrow = 0);
21 void average_node_to_cellcenter (MultiFab& cc, int dcomp,
22 const MultiFab& nd, int scomp,
23 int ncomp, IntVect const& ng_vect);
24
31 void average_edge_to_cellcenter (MultiFab& cc, int dcomp,
32 const Vector<const MultiFab*>& edge,
33 int ngrow = 0);
34 void average_edge_to_cellcenter (MultiFab& cc, int dcomp,
35 const Vector<const MultiFab*>& edge,
36 IntVect const& ng_vect);
37
44 void average_face_to_cellcenter (MultiFab& cc, int dcomp,
45 const Vector<const MultiFab*>& fc,
46 int ngrow = 0);
47 void average_face_to_cellcenter (MultiFab& cc, int dcomp,
48 const Vector<const MultiFab*>& fc,
49 IntVect const& ng_vect);
50
52 template <typename CMF, typename FMF,
53 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> = 0>
54 void average_face_to_cellcenter (CMF& cc, int dcomp,
55 const Array<const FMF*,AMREX_SPACEDIM>& fc,
56 int ngrow = 0);
57
58 template <typename CMF, typename FMF,
59 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> = 0>
60 void average_face_to_cellcenter (CMF& cc, int dcomp,
61 const Array<const FMF*,AMREX_SPACEDIM>& fc,
62 IntVect const& ng_vect);
63
65 void average_face_to_cellcenter (MultiFab& cc,
66 const Vector<const MultiFab*>& fc,
67 const Geometry& geom);
69 void average_face_to_cellcenter (MultiFab& cc,
70 const Array<const MultiFab*,AMREX_SPACEDIM>& fc,
71 const Geometry& geom);
73 void average_cellcenter_to_face (const Vector<MultiFab*>& fc,
74 const MultiFab& cc,
75 const Geometry& geom,
76 int ncomp = 1,
77 bool use_harmonic_averaging = false);
79 void average_cellcenter_to_face (const Array<MultiFab*,AMREX_SPACEDIM>& fc,
80 const MultiFab& cc,
81 const Geometry& geom,
82 int ncomp = 1,
83 bool use_harmonic_averaging = false);
84
86 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
87 void average_down_faces (const Vector<const MF*>& fine,
88 const Vector<MF*>& crse,
89 const IntVect& ratio,
90 int ngcrse = 0);
92 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
93 void average_down_faces (const Vector<const MF*>& fine,
94 const Vector<MF*>& crse,
95 int ratio,
96 int ngcrse = 0);
98 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
99 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
100 const Array<MF*,AMREX_SPACEDIM>& crse,
101 const IntVect& ratio,
102 int ngcrse = 0);
104 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
105 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
106 const Array<MF*,AMREX_SPACEDIM>& crse,
107 int ratio,
108 int ngcrse = 0);
115 template <typename FAB>
116 void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
117 const IntVect& ratio, int ngcrse=0);
118
119 // This version takes periodicity into account.
120 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
121 void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
122 const Array<MF*,AMREX_SPACEDIM>& crse,
123 const IntVect& ratio, const Geometry& crse_geom);
124 // This version takes periodicity into account.
125 template <typename FAB>
126 void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
127 const IntVect& ratio, const Geometry& crse_geom);
128
130 void average_down_edges (const Vector<const MultiFab*>& fine,
131 const Vector<MultiFab*>& crse,
132 const IntVect& ratio,
133 int ngcrse = 0);
134 void average_down_edges (const Array<const MultiFab*,AMREX_SPACEDIM>& fine,
135 const Array<MultiFab*,AMREX_SPACEDIM>& crse,
136 const IntVect& ratio,
137 int ngcrse = 0);
141 void average_down_edges (const MultiFab& fine, MultiFab& crse,
142 const IntVect& ratio, int ngcrse=0);
143
145 template <typename FAB>
146 void average_down_nodal (const FabArray<FAB>& S_fine,
147 FabArray<FAB>& S_crse,
148 const IntVect& ratio,
149 int ngcrse = 0,
150 bool mfiter_is_definitely_safe=false);
151
158 void average_down (const MultiFab& S_fine, MultiFab& S_crse,
159 const Geometry& fgeom, const Geometry& cgeom,
160 int scomp, int ncomp, const IntVect& ratio);
161 void average_down (const MultiFab& S_fine, MultiFab& S_crse,
162 const Geometry& fgeom, const Geometry& cgeom,
163 int scomp, int ncomp, int rr);
164
168 template<typename FAB>
169 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
170 int scomp, int ncomp, const IntVect& ratio);
171 template<typename FAB>
172 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
173 int scomp, int ncomp, int rr);
174
177 void sum_fine_to_coarse (const MultiFab& S_Fine, MultiFab& S_crse,
178 int scomp, int ncomp,
179 const IntVect& ratio,
180 const Geometry& cgeom, const Geometry& fgeom);
181
183 void print_state (const MultiFab& mf, const IntVect& cell, int n=-1,
184 const IntVect& ng = IntVect::TheZeroVector());
185
187 void writeFabs (const MultiFab& mf, const std::string& name);
188 void writeFabs (const MultiFab& mf, int comp, int ncomp, const std::string& name);
189
192 std::unique_ptr<MultiFab> get_slice_data(int dir, Real coord,
193 const MultiFab& cc,
194 const Geometry& geom, int start_comp, int ncomp,
195 bool interpolate=false,
196 RealBox const& bnd_rbx = RealBox());
197
205 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
206 Vector<typename MF::value_type> get_cell_data (MF const& mf, IntVect const& cell);
207
214 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
215 MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx = Box());
216
220 template <typename FAB>
221 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
222 int crse_value = 0, int fine_value = 1);
223 iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
224 const BoxArray& fba, const IntVect& ratio,
225 int crse_value = 0, int fine_value = 1);
226 template <typename FAB>
227 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
228 Periodicity const& period, int crse_value, int fine_value);
229 iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
230 const IntVect& cnghost, const BoxArray& fba, const IntVect& ratio,
231 Periodicity const& period, int crse_value, int fine_value);
232 template <typename FAB>
233 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
234 const IntVect& cnghost, const IntVect& ratio,
235 Periodicity const& period, int crse_value, int fine_value);
236 template <typename FAB>
237 iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
238 const IntVect& cnghost, const IntVect& ratio,
239 Periodicity const& period, int crse_value, int fine_value,
240 LayoutData<int>& has_cf);
241
242 MultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
243 const BoxArray& fba, const IntVect& ratio,
244 Real crse_value, Real fine_value);
245
247 void computeDivergence (MultiFab& divu, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
248 const Geometry& geom);
249
251 void computeGradient (MultiFab& grad, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
252 const Geometry& geom);
253
255 MultiFab ToMultiFab (const iMultiFab& imf);
257 FabArray<BaseFab<Long> > ToLongMultiFab (const iMultiFab& imf);
258
260 MultiFab periodicShift (MultiFab const& mf, IntVect const& offset,
261 Periodicity const& period);
262
264 template <typename T, typename U>
265 T cast (U const& mf_in)
266 {
267 T mf_out(mf_in.boxArray(), mf_in.DistributionMap(), mf_in.nComp(), mf_in.nGrowVect());
268
269#ifdef AMREX_USE_OMP
270#pragma omp parallel if (Gpu::notInLaunchRegion())
271#endif
272 for (MFIter mfi(mf_in); mfi.isValid(); ++mfi)
273 {
274 const Long n = mfi.fabbox().numPts() * mf_in.nComp();
275 auto * pdst = mf_out[mfi].dataPtr();
276 auto const* psrc = mf_in [mfi].dataPtr();
278 {
279 pdst[i] = static_cast<typename T::value_type>(psrc[i]); // NOLINT(bugprone-signed-char-misuse)
280 });
281 }
282 return mf_out;
283 }
284
345 template <typename Op, typename T, typename FAB, typename F,
346 std::enable_if_t<IsBaseFab<FAB>::value
347#ifndef AMREX_USE_CUDA
348 && IsCallableR<T,F,int,int,int,int>::value
349#endif
350 , int> FOO = 0>
351 BaseFab<T>
352 ReduceToPlane (int direction, Box const& domain, FabArray<FAB> const& mf, F const& f);
353
398 template <typename Op, typename FA, typename F,
399 std::enable_if_t<IsMultiFabLike_v<FA>
400#ifndef AMREX_USE_CUDA
401 && IsCallableR<typename FA::value_type,
402 F,int,int,int,int>::value
403#endif
404 , int> FOO = 0>
405 FA ReduceToPlaneMF (int direction, Box const& domain, FA const& mf, F const& f);
406
449 template <typename Op, typename FA, typename F,
450 std::enable_if_t<IsMultiFabLike_v<FA>
451#ifndef AMREX_USE_CUDA
452 && IsCallableR<typename FA::value_type,
453 F,int,int,int,int>::value
454#endif
455 , int> FOO = 0>
456 std::pair<FA,FA>
457 ReduceToPlaneMF2 (int direction, Box const& domain, FA const& mf, F const& f);
458
474 Gpu::HostVector<Real> sumToLine (MultiFab const& mf, int icomp, int ncomp,
475 Box const& domain, int direction, bool local = false);
476
484 Real volumeWeightedSum (Vector<MultiFab const*> const& mf, int icomp,
485 Vector<Geometry> const& geom,
486 Vector<IntVect> const& ratio,
487 bool local = false);
488
502 void FourthOrderInterpFromFineToCoarse (MultiFab& cmf, int scomp, int ncomp,
503 MultiFab const& fmf,
504 IntVect const& ratio);
505
516 void FillRandom (MultiFab& mf, int scomp, int ncomp);
517
529 void FillRandomNormal (MultiFab& mf, int scomp, int ncomp, Real mean, Real stddev);
530
542 [[nodiscard]] Vector<MultiFab> convexify (Vector<MultiFab const*> const& mf,
543 Vector<IntVect> const& refinement_ratio);
544}
545
546namespace amrex {
547
548template <typename FAB>
549iMultiFab
550makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
551 int crse_value, int fine_value)
552{
553 return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
554 fba, ratio, Periodicity::NonPeriodic(), crse_value, fine_value);
555}
556
557template <typename FAB>
558iMultiFab
559makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
560 Periodicity const& period, int crse_value, int fine_value)
561{
562 return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
563 fba, ratio, period, crse_value, fine_value);
564}
565
566template <typename FAB>
567iMultiFab
569 const IntVect& cnghost, const IntVect& ratio,
570 Periodicity const& period, int crse_value, int fine_value)
571{
572 iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
573 mask.setVal(crse_value);
574
575 iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
576 1, 0, MFInfo().SetAlloc(false));
577 const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
578 mask.setVal(fine_value, cpc, 0, 1);
579
580 return mask;
581}
582
583template <typename FAB>
584iMultiFab
586 const IntVect& cnghost, const IntVect& ratio,
587 Periodicity const& period, int crse_value, int fine_value,
588 LayoutData<int>& has_cf)
589{
590 iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
591 mask.setVal(crse_value);
592
593 iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
594 1, 0, MFInfo().SetAlloc(false));
595 const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
596 mask.setVal(fine_value, cpc, 0, 1);
597
598 has_cf = mask.RecvLayoutMask(cpc);
599
600 return mask;
601}
602
605template <typename FAB>
607 const IntVect& ratio, int ngcrse, bool mfiter_is_definitely_safe)
608{
609 AMREX_ASSERT(fine.is_nodal());
610 AMREX_ASSERT(crse.is_nodal());
611 AMREX_ASSERT(crse.nComp() == fine.nComp());
612
613 int ncomp = crse.nComp();
614 using value_type = typename FAB::value_type;
615
616 if (mfiter_is_definitely_safe || isMFIterSafe(fine, crse))
617 {
618#ifdef AMREX_USE_OMP
619#pragma omp parallel if (Gpu::notInLaunchRegion())
620#endif
621 for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
622 {
623 const Box& bx = mfi.growntilebox(ngcrse);
624 Array4<value_type> const& crsearr = crse.array(mfi);
625 Array4<value_type const> const& finearr = fine.const_array(mfi);
626
628 {
629 amrex_avgdown_nodes(tbx,crsearr,finearr,0,0,ncomp,ratio);
630 });
631 }
632 }
633 else
634 {
635 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
636 ncomp, ngcrse);
637 average_down_nodal(fine, ctmp, ratio, ngcrse);
638 crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
639 }
640}
641
642// *************************************************************************************************************
643
644// Average fine cell-based MultiFab onto crse cell-centered MultiFab.
645// We do NOT assume that the coarse layout is a coarsened version of the fine layout.
646// This version does NOT use volume-weighting
647template<typename FAB>
648void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse, int scomp, int ncomp, int rr)
649{
650 average_down(S_fine,S_crse,scomp,ncomp,rr*IntVect::TheUnitVector());
651}
652
653template<typename FAB>
654void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
655 int scomp, int ncomp, const IntVect& ratio)
656{
657 BL_PROFILE("amrex::average_down");
658 AMREX_ASSERT(S_crse.nComp() == S_fine.nComp());
659 AMREX_ASSERT((S_crse.is_cell_centered() && S_fine.is_cell_centered()) ||
660 (S_crse.is_nodal() && S_fine.is_nodal()));
661
662 using value_type = typename FAB::value_type;
663
664 bool is_cell_centered = S_crse.is_cell_centered();
665
666 //
667 // Coarsen() the fine stuff on processors owning the fine data.
668 //
669 BoxArray crse_S_fine_BA = S_fine.boxArray(); crse_S_fine_BA.coarsen(ratio);
670
671 if (crse_S_fine_BA == S_crse.boxArray() && S_fine.DistributionMap() == S_crse.DistributionMap())
672 {
673#ifdef AMREX_USE_GPU
674 if (Gpu::inLaunchRegion() && S_crse.isFusingCandidate()) {
675 auto const& crsema = S_crse.arrays();
676 auto const& finema = S_fine.const_arrays();
677 if (is_cell_centered) {
678 ParallelFor(S_crse, IntVect(0), ncomp,
679 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
680 {
681 amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
682 });
683 } else {
684 ParallelFor(S_crse, IntVect(0), ncomp,
685 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
686 {
687 amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
688 });
689 }
690 if (!Gpu::inNoSyncRegion()) {
692 }
693 } else
694#endif
695 {
696#ifdef AMREX_USE_OMP
697#pragma omp parallel if (Gpu::notInLaunchRegion())
698#endif
699 for (MFIter mfi(S_crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
700 {
701 // NOTE: The tilebox is defined at the coarse level.
702 const Box& bx = mfi.tilebox();
703 Array4<value_type> const& crsearr = S_crse.array(mfi);
704 Array4<value_type const> const& finearr = S_fine.const_array(mfi);
705
706 if (is_cell_centered) {
707 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
708 {
709 amrex_avgdown(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
710 });
711 } else {
712 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
713 {
714 amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
715 });
716 }
717 }
718 }
719 }
720 else
721 {
722 FabArray<FAB> crse_S_fine(crse_S_fine_BA, S_fine.DistributionMap(), ncomp, 0, MFInfo(),DefaultFabFactory<FAB>());
723
724#ifdef AMREX_USE_GPU
725 if (Gpu::inLaunchRegion() && crse_S_fine.isFusingCandidate()) {
726 auto const& crsema = crse_S_fine.arrays();
727 auto const& finema = S_fine.const_arrays();
728 if (is_cell_centered) {
729 ParallelFor(crse_S_fine, IntVect(0), ncomp,
730 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
731 {
732 amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
733 });
734 } else {
735 ParallelFor(crse_S_fine, IntVect(0), ncomp,
736 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
737 {
738 amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
739 });
740 }
741 if (!Gpu::inNoSyncRegion()) {
743 }
744 } else
745#endif
746 {
747#ifdef AMREX_USE_OMP
748#pragma omp parallel if (Gpu::notInLaunchRegion())
749#endif
750 for (MFIter mfi(crse_S_fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
751 {
752 // NOTE: The tilebox is defined at the coarse level.
753 const Box& bx = mfi.tilebox();
754 Array4<value_type> const& crsearr = crse_S_fine.array(mfi);
755 Array4<value_type const> const& finearr = S_fine.const_array(mfi);
756
757 // NOTE: We copy from component scomp of the fine fab into component 0 of the crse fab
758 // because the crse fab is a temporary which was made starting at comp 0, it is
759 // not part of the actual crse multifab which came in.
760
761 if (is_cell_centered) {
762 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
763 {
764 amrex_avgdown(i,j,k,n,crsearr,finearr,0,scomp,ratio);
765 });
766 } else {
767 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
768 {
769 amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,0,scomp,ratio);
770 });
771 }
772 }
773 }
774
775 S_crse.ParallelCopy(crse_S_fine,0,scomp,ncomp);
776 }
777}
778
779
780
781
782
790template <typename F>
791Real
792NormHelper (const MultiFab& x, int xcomp,
793 const MultiFab& y, int ycomp,
794 F const& f,
795 int numcomp, IntVect nghost, bool local)
796{
797 BL_ASSERT(x.boxArray() == y.boxArray());
798 BL_ASSERT(x.DistributionMap() == y.DistributionMap());
799 BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
800
801 Real sm = Real(0.0);
802#ifdef AMREX_USE_GPU
803 if (Gpu::inLaunchRegion()) {
804 auto const& xma = x.const_arrays();
805 auto const& yma = y.const_arrays();
807 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
808 {
809 Real t = Real(0.0);
810 auto const& xfab = xma[box_no];
811 auto const& yfab = yma[box_no];
812 for (int n = 0; n < numcomp; ++n) {
813 t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
814 }
815 return t;
816 });
817 } else
818#endif
819 {
820#ifdef AMREX_USE_OMP
821#pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
822#endif
823 for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
824 {
825 Box const& bx = mfi.growntilebox(nghost);
826 Array4<Real const> const& xfab = x.const_array(mfi);
827 Array4<Real const> const& yfab = y.const_array(mfi);
828 AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
829 {
830 sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
831 });
832 }
833 }
834
835 if (!local) {
837 }
838
839 return sm;
840}
841
850template <typename MMF, typename Pred, typename F>
851Real
852NormHelper (const MMF& mask,
853 const MultiFab& x, int xcomp,
854 const MultiFab& y, int ycomp,
855 Pred const& pf,
856 F const& f,
857 int numcomp, IntVect nghost, bool local)
858{
859 BL_ASSERT(x.boxArray() == y.boxArray());
860 BL_ASSERT(x.boxArray() == mask.boxArray());
861 BL_ASSERT(x.DistributionMap() == y.DistributionMap());
862 BL_ASSERT(x.DistributionMap() == mask.DistributionMap());
863 BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
864 BL_ASSERT(mask.nGrowVect().allGE(nghost));
865
866 Real sm = Real(0.0);
867#ifdef AMREX_USE_GPU
868 if (Gpu::inLaunchRegion()) {
869 auto const& xma = x.const_arrays();
870 auto const& yma = y.const_arrays();
871 auto const& mma = mask.const_arrays();
873 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
874 {
875 Real t = Real(0.0);
876 if (pf(mma[box_no](i,j,k))) {
877 auto const& xfab = xma[box_no];
878 auto const& yfab = yma[box_no];
879 for (int n = 0; n < numcomp; ++n) {
880 t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
881 }
882 }
883 return t;
884 });
885 } else
886#endif
887 {
888#ifdef AMREX_USE_OMP
889#pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
890#endif
891 for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
892 {
893 Box const& bx = mfi.growntilebox(nghost);
894 Array4<Real const> const& xfab = x.const_array(mfi);
895 Array4<Real const> const& yfab = y.const_array(mfi);
896 auto const& mfab = mask.const_array(mfi);
897 AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
898 {
899 if (pf(mfab(i,j,k))) {
900 sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
901 }
902 });
903 }
904 }
905
906 if (!local) {
908 }
909
910 return sm;
911}
912
913template <typename CMF, typename FMF,
914 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> FOO>
915void average_face_to_cellcenter (CMF& cc, int dcomp,
917 int ngrow)
918{
919 IntVect ng_vect(ngrow);
920 average_face_to_cellcenter(cc, dcomp, fc, ng_vect);
921}
922
923template <typename CMF, typename FMF,
924 std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> FOO>
925void average_face_to_cellcenter (CMF& cc, int dcomp,
927 IntVect const& ng_vect)
928{
929 AMREX_ASSERT(cc.nComp() >= dcomp + AMREX_SPACEDIM);
930 AMREX_ASSERT(fc[0]->nComp() == 1);
931
932#ifdef AMREX_USE_GPU
933 if (Gpu::inLaunchRegion() && cc.isFusingCandidate()) {
934 auto const& ccma = cc.arrays();
935 AMREX_D_TERM(auto const& fxma = fc[0]->const_arrays();,
936 auto const& fyma = fc[1]->const_arrays();,
937 auto const& fzma = fc[2]->const_arrays(););
938 ParallelFor(cc, ng_vect,
939 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
940 {
941#if (AMREX_SPACEDIM == 1)
942 GeometryData gd{};
943 gd.coord = 0;
944#endif
945 amrex_avg_fc_to_cc(i,j,k, ccma[box_no], AMREX_D_DECL(fxma[box_no],
946 fyma[box_no],
947 fzma[box_no]),
948 dcomp
949#if (AMREX_SPACEDIM == 1)
950 , gd
951#endif
952 );
953 });
954 if (!Gpu::inNoSyncRegion()) {
956 }
957 } else
958#endif
959 {
960#ifdef AMREX_USE_OMP
961#pragma omp parallel if (Gpu::notInLaunchRegion())
962#endif
963 for (MFIter mfi(cc,TilingIfNotGPU()); mfi.isValid(); ++mfi)
964 {
965 const Box bx = mfi.growntilebox(ng_vect);
966 auto const& ccarr = cc.array(mfi);
967 AMREX_D_TERM(auto const& fxarr = fc[0]->const_array(mfi);,
968 auto const& fyarr = fc[1]->const_array(mfi);,
969 auto const& fzarr = fc[2]->const_array(mfi););
970
971#if (AMREX_SPACEDIM == 1)
973 {
974 GeometryData gd;
975 gd.coord = 0;
976 amrex_avg_fc_to_cc(i,j,k, ccarr, fxarr, dcomp, gd);
977 });
978#else
980 {
981 amrex_avg_fc_to_cc(i,j,k, ccarr, AMREX_D_DECL(fxarr,fyarr,fzarr), dcomp);
982 });
983#endif
984 }
985 }
986}
987
988template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
990 const Vector<MF*>& crse,
991 const IntVect& ratio, int ngcrse)
992{
993 AMREX_ASSERT(fine.size() == AMREX_SPACEDIM && crse.size() == AMREX_SPACEDIM);
995 {{AMREX_D_DECL(fine[0],fine[1],fine[2])}},
997 {{AMREX_D_DECL(crse[0],crse[1],crse[2])}},
998 ratio, ngcrse);
999}
1000
1001template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
1003 const Vector<MF*>& crse, int ratio, int ngcrse)
1004{
1005 average_down_faces(fine,crse,IntVect{ratio},ngcrse);
1006}
1007
1008template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
1011 int ratio, int ngcrse)
1012{
1013 average_down_faces(fine,crse,IntVect{ratio},ngcrse);
1014}
1015
1016template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
1019 const IntVect& ratio, int ngcrse)
1020{
1021 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1022 average_down_faces(*fine[idim], *crse[idim], ratio, ngcrse);
1023 }
1024}
1025
1026template <typename FAB>
1028 const IntVect& ratio, int ngcrse)
1029{
1030 BL_PROFILE("average_down_faces");
1031
1032 AMREX_ASSERT(crse.nComp() == fine.nComp());
1033 AMREX_ASSERT(fine.ixType() == crse.ixType());
1034 const auto type = fine.ixType();
1035 int dir;
1036 for (dir = 0; dir < AMREX_SPACEDIM; ++dir) {
1037 if (type.nodeCentered(dir)) { break; }
1038 }
1039 auto tmptype = type;
1040 tmptype.unset(dir);
1041 if (dir >= AMREX_SPACEDIM || !tmptype.cellCentered()) {
1042 amrex::Abort("average_down_faces: not face index type");
1043 }
1044 const int ncomp = crse.nComp();
1045 if (isMFIterSafe(fine, crse))
1046 {
1047#ifdef AMREX_USE_GPU
1048 if (Gpu::inLaunchRegion() && crse.isFusingCandidate()) {
1049 auto const& crsema = crse.arrays();
1050 auto const& finema = fine.const_arrays();
1051 ParallelFor(crse, IntVect(ngcrse), ncomp,
1052 [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
1053 {
1054 amrex_avgdown_faces(i,j,k,n, crsema[box_no], finema[box_no], 0, 0, ratio, dir);
1055 });
1056 if (!Gpu::inNoSyncRegion()) {
1058 }
1059 } else
1060#endif
1061 {
1062#ifdef AMREX_USE_OMP
1063#pragma omp parallel if (Gpu::notInLaunchRegion())
1064#endif
1065 for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
1066 {
1067 const Box& bx = mfi.growntilebox(ngcrse);
1068 auto const& crsearr = crse.array(mfi);
1069 auto const& finearr = fine.const_array(mfi);
1070 AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
1071 {
1072 amrex_avgdown_faces(i,j,k,n, crsearr, finearr, 0, 0, ratio, dir);
1073 });
1074 }
1075 }
1076 }
1077 else
1078 {
1079 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
1080 ncomp, ngcrse, MFInfo(), DefaultFabFactory<FAB>());
1081 average_down_faces(fine, ctmp, ratio, ngcrse);
1082 crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
1083 }
1084}
1085
1086template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
1089 const IntVect& ratio, const Geometry& crse_geom)
1090{
1091 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1092 average_down_faces(*fine[idim], *crse[idim], ratio, crse_geom);
1093 }
1094}
1095
1096template <typename FAB>
1098 const IntVect& ratio, const Geometry& crse_geom)
1099{
1100 FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
1101 crse.nComp(), 0);
1102 average_down_faces(fine, ctmp, ratio, 0);
1103 crse.ParallelCopy(ctmp,0,0,crse.nComp(),0,0,crse_geom.periodicity());
1104}
1105
1106template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
1108{
1109 using T = typename MF::value_type;
1110 const int ncomp = mf.nComp();
1111 Gpu::DeviceVector<T> dv(ncomp);
1112 auto* dp = dv.data();
1113 bool found = false;
1114 auto loc = cell.dim3();
1115 for (MFIter mfi(mf); mfi.isValid() && !found; ++mfi)
1116 {
1117 Box const& box = mfi.validbox();
1118 if (box.contains(cell)) {
1119 found = true;
1120 auto const& fab = mf.const_array(mfi);
1121 amrex::ParallelFor(1, [=] AMREX_GPU_DEVICE (int) noexcept
1122 {
1123 for (int n = 0; n < ncomp; ++n) {
1124 dp[n] = fab(loc.x,loc.y,loc.z,n);
1125 }
1126 });
1127 }
1128 }
1129 Vector<T> hv;
1130 if (found) {
1131 hv.resize(ncomp);
1132 Gpu::copy(Gpu::deviceToHost, dv.begin(), dv.end(), hv.begin());
1133 }
1134 return hv;
1135}
1136
1137template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
1138MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx)
1139{
1140 bool do_bnd = (!bnd_bx.isEmpty());
1141
1142 BoxArray const& ba = mf.boxArray();
1143 DistributionMapping const& dm = mf.DistributionMap();
1144 const auto nboxes = static_cast<int>(ba.size());
1145
1146 BoxList bl(ba.ixType());
1147 Vector<int> procmap;
1148 Vector<int> index_map;
1149 if (!do_bnd) {
1150 for (int i = 0; i < nboxes; ++i) {
1151 Box const& b = ba[i];
1152 IntVect lo = cell;
1153 lo[dir] = b.smallEnd(dir);
1154 if (b.contains(lo)) {
1155 IntVect hi = lo;
1156 hi[dir] = b.bigEnd(dir);
1157 Box b1d(lo,hi,b.ixType());
1158 bl.push_back(b1d);
1159 procmap.push_back(dm[i]);
1160 index_map.push_back(i);
1161 }
1162 }
1163 } else {
1164 for (int i = 0; i < nboxes; ++i) {
1165 Box const& b = ba[i];
1166 Box const& b1d = bnd_bx & b;
1167 if (b1d.ok()) {
1168 bl.push_back(b1d);
1169 procmap.push_back(dm[i]);
1170 index_map.push_back(i);
1171 }
1172 }
1173 }
1174
1175 if (bl.isEmpty()) {
1176 return MF();
1177 } else {
1178 BoxArray rba(std::move(bl));
1179 DistributionMapping rdm(std::move(procmap));
1180 MF rmf(rba, rdm, mf.nComp(), IntVect(0),
1181 MFInfo().SetArena(mf.arena()));
1182#ifdef AMREX_USE_OMP
1183#pragma omp parallel if (Gpu::notInLaunchRegion())
1184#endif
1185 for (MFIter mfi(rmf); mfi.isValid(); ++mfi) {
1186 Box const& b = mfi.validbox();
1187 auto const& dfab = rmf.array(mfi);
1188 auto const& sfab = mf.const_array(index_map[mfi.index()]);
1189 amrex::ParallelFor(b, mf.nComp(),
1190 [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
1191 {
1192 dfab(i,j,k,n) = sfab(i,j,k,n);
1193 });
1194 }
1195 return rmf;
1196 }
1197}
1198
1199namespace detail {
1200template <typename Op, typename T, typename F>
1201void reduce_to_plane (Array4<T> const& ar, int direction, Box const& bx, int box_no,
1202 F const& f)
1203{
1204#if defined(AMREX_USE_GPU)
1205 Box b2d = bx;
1206 b2d.setRange(direction,0);
1207 const auto blo = amrex::lbound(bx);
1208 const auto len = amrex::length(bx);
1209 constexpr int nthreads = 128;
1210 auto nblocks = static_cast<int>(b2d.numPts());
1211#ifdef AMREX_USE_SYCL
1212 constexpr std::size_t shared_mem_bytes = sizeof(T)*Gpu::Device::warp_size;
1213 amrex::launch<nthreads>(nblocks, shared_mem_bytes, Gpu::gpuStream(),
1214 [=] AMREX_GPU_DEVICE (Gpu::Handler const& h)
1215 {
1216 int bid = h.blockIdx();
1217 int tid = h.threadIdx();
1218#else
1219 amrex::launch<nthreads>(nblocks, Gpu::gpuStream(),
1220 [=] AMREX_GPU_DEVICE ()
1221 {
1222 int bid = blockIdx.x;
1223 int tid = threadIdx.x;
1224#endif
1225 T tmp;
1226 Op().init(tmp);
1227 T* p;
1228 if (direction == 0) {
1229 int k = bid / len.y;
1230 int j = bid - k*len.y;
1231 k += blo.z;
1232 j += blo.y;
1233 for (int i = blo.x + tid; i < blo.x+len.x; i += nthreads) {
1234 Op().local_update(tmp, f(box_no,i,j,k));
1235 }
1236 p = ar.ptr(0,j,k);
1237 } else if (direction == 1) {
1238 int k = bid / len.x;
1239 int i = bid - k*len.x;
1240 k += blo.z;
1241 i += blo.x;
1242 for (int j = blo.y + tid; j < blo.y+len.y; j += nthreads) {
1243 Op().local_update(tmp, f(box_no,i,j,k));
1244 }
1245 p = ar.ptr(i,0,k);
1246 } else {
1247 int j = bid / len.x;
1248 int i = bid - j*len.x;
1249 j += blo.y;
1250 i += blo.x;
1251 for (int k = blo.z + tid; k < blo.z+len.z; k += nthreads) {
1252 Op().local_update(tmp, f(box_no,i,j,k));
1253 }
1254 p = ar.ptr(i,j,0);
1255 }
1256#ifdef AMREX_USE_SYCL
1257 Op().template parallel_update<T>(*p, tmp, h);
1258#else
1259 Op().template parallel_update<T,nthreads>(*p, tmp);
1260#endif
1261 });
1262#else
1263 // CPU
1264 if (direction == 0) {
1265 AMREX_LOOP_3D(bx, i, j, k,
1266 {
1267 Op().local_update(ar(0,j,k), f(box_no,i,j,k));
1268 });
1269 } else if (direction == 1) {
1270 AMREX_LOOP_3D(bx, i, j, k,
1271 {
1272 Op().local_update(ar(i,0,k), f(box_no,i,j,k));
1273 });
1274 } else {
1275 AMREX_LOOP_3D(bx, i, j, k,
1276 {
1277 Op().local_update(ar(i,j,0), f(box_no,i,j,k));
1278 });
1279 }
1280#endif
1281}
1282}
1283
1284template <typename Op, typename T, typename FAB, typename F,
1285 std::enable_if_t<IsBaseFab<FAB>::value
1286#ifndef AMREX_USE_CUDA
1288#endif
1289 , int> FOO>
1291ReduceToPlane (int direction, Box const& a_domain, FabArray<FAB> const& mf, F const& f)
1292{
1293 Box const domain = amrex::convert(a_domain, mf.ixType());
1294
1295 Box domain2d = domain;
1296 domain2d.setRange(direction, 0);
1297
1298 T initval;
1299 Op().init(initval);
1300
1301 BaseFab<T> r(domain2d);
1302 r.template setVal<RunOn::Device>(initval);
1303 auto const& ar = r.array();
1304
1305 for (MFIter mfi(mf,MFItInfo().UseDefaultStream().DisableDeviceSync());
1306 mfi.isValid(); ++mfi)
1307 {
1308 Box bx = mfi.validbox() & domain;
1309 if (bx.ok()) {
1310 int box_no = mfi.LocalIndex();
1311 detail::reduce_to_plane<Op, T>(ar, direction, bx, box_no, f);
1312 }
1313 }
1315
1316 return r;
1317}
1318
1319namespace detail {
1320template <typename Op, typename FA, typename F>
1321FA reduce_to_plane (int direction, Box const& domain, FA const& mf, F const& f)
1322{
1323 using T = typename FA::value_type;
1324
1325 Box const ndomain = amrex::convert(domain, mf.ixType());
1326
1327 auto npts = amrex::convert(mf.boxArray(),IntVect(0)).numPts();
1328 if (npts != amrex::convert(domain, amrex::IntVect(0)).numPts()) {
1329 amrex::Abort("ReduceToPlaneMF: mf's BoxArray must have a rectangular domain.");
1330 }
1331
1332 T initval;
1333 Op().init(initval);
1334
1335 BoxList bl = mf.boxArray().boxList();
1336 for (auto& b : bl) {
1337 b.setRange(direction, 0);
1338 }
1339 BoxArray ba(std::move(bl));
1340 FA tmpfa(ba, mf.DistributionMap(), 1, 0);
1341 tmpfa.setVal(initval);
1342
1343 for (MFIter mfi(mf); mfi.isValid(); ++mfi)
1344 {
1345 Box bx = mfi.validbox() & ndomain;
1346 if (bx.ok()) {
1347 int box_no = mfi.LocalIndex();
1348 detail::reduce_to_plane<Op, T>(tmpfa.array(mfi), direction, bx, box_no, f);
1349 }
1350 }
1351
1352 return tmpfa;
1353}
1354}
1355
1356template <typename Op, typename FA, typename F,
1357 std::enable_if_t<IsMultiFabLike_v<FA>
1358#ifndef AMREX_USE_CUDA
1359 && IsCallableR<typename FA::value_type,
1360 F,int,int,int,int>::value
1361#endif
1362 , int> FOO>
1363FA ReduceToPlaneMF (int direction, Box const& domain, FA const& mf, F const& f)
1364{
1365 auto [fa3, fa2] = ReduceToPlaneMF2<Op>(direction, domain, mf, f);
1366 fa3.ParallelCopy(fa2);
1367 return std::move(fa3);
1368}
1369
1370template <typename Op, typename FA, typename F,
1371 std::enable_if_t<IsMultiFabLike_v<FA>
1372#ifndef AMREX_USE_CUDA
1373 && IsCallableR<typename FA::value_type,
1374 F,int,int,int,int>::value
1375#endif
1376 , int> FOO>
1377std::pair<FA,FA>
1378ReduceToPlaneMF2 (int direction, Box const& domain, FA const& mf, F const& f)
1379{
1380 using T = typename FA::value_type;
1381
1382 T initval;
1383 Op().init(initval);
1384
1385 auto tmpmf = detail::reduce_to_plane<Op>(direction, domain, mf, f);
1386
1387 BoxList bl2d(mf.ixType());
1388 Vector<int> procmap2d;
1389 auto const& ba3d = mf.boxArray();
1390 auto const& dm3d = mf.DistributionMap();
1391 int dlo = domain.smallEnd(direction);
1392 for (int i = 0, N = mf.size(); i < N; ++i) {
1393 Box b = ba3d[i];
1394 if (b.smallEnd(direction) <= dlo && dlo <= b.bigEnd(direction)) {
1395 b.setRange(direction, 0);
1396 bl2d.push_back(b);
1397 procmap2d.push_back(dm3d[i]);
1398 }
1399 }
1400
1401 BoxArray ba2d(std::move(bl2d));
1402 DistributionMapping dm2d(std::move(procmap2d));
1403
1404 FA mf2d(ba2d, dm2d, 1, 0);
1405 mf2d.setVal(initval);
1406
1407 static_assert(std::is_same_v<Op, ReduceOpSum>, "Currently only ReduceOpSum is supported.");
1408 mf2d.ParallelAdd(tmpmf);
1409
1410 return std::make_pair(std::move(tmpmf), std::move(mf2d));
1411}
1412
1413}
1414
1415#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_HOST_DEVICE_PARALLEL_FOR_1D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:109
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:110
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition AMReX_GpuLaunch.nolint.H:16
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition AMReX_HypreMLABecLap.cpp:1089
Real * pdst
Definition AMReX_HypreMLABecLap.cpp:1090
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_3D(bx, i, j, k, block)
Definition AMReX_Loop.nolint.H:4
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A FortranArrayBox(FAB)-like object.
Definition AMReX_BaseFab.H:183
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:551
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition AMReX_BoxArray.H:841
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Definition AMReX_BoxArray.cpp:665
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition AMReX_BoxArray.H:598
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
bool isEmpty() const noexcept
Is this BoxList empty?
Definition AMReX_BoxList.H:135
void push_back(const Box &bn)
Append a Box to this BoxList.
Definition AMReX_BoxList.H:93
__host__ __device__ bool isEmpty() const noexcept
Checks if it is an empty BoxND.
Definition AMReX_Box.H:199
__host__ __device__ Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition AMReX_Box.H:349
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition AMReX_Box.H:207
__host__ __device__ IndexTypeND< dim > ixType() const noexcept
Returns the indexing type.
Definition AMReX_Box.H:130
__host__ __device__ BoxND & setRange(int dir, int sm_index, int n_cells=1) noexcept
Set the entire range in a given direction, starting at sm_index with length n_cells....
Definition AMReX_Box.H:1064
__host__ __device__ bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition AMReX_Box.H:203
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Get the smallend of the BoxND.
Definition AMReX_Box.H:108
Definition AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:41
IntVect nGrowVect() const noexcept
Definition AMReX_FabArrayBase.H:80
bool isFusingCandidate() const noexcept
Is this a good candidate for kernel fusing?
Definition AMReX_FabArrayBase.cpp:2707
bool is_cell_centered() const noexcept
This tests on whether the FabArray is cell-centered.
Definition AMReX_FabArrayBase.cpp:2701
bool is_nodal() const noexcept
This tests on whether the FabArray is fully nodal.
Definition AMReX_FabArrayBase.cpp:2689
IndexType ixType() const noexcept
Return index type.
Definition AMReX_FabArrayBase.H:86
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition AMReX_FabArrayBase.H:83
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
An Array of FortranArrayBox(FAB)-like Objects.
Definition AMReX_FabArray.H:345
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:585
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition AMReX_FabArray.H:845
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:561
MultiArray4< typename FabArray< FAB >::value_type > arrays() noexcept
Definition AMReX_FabArray.H:633
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:647
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition AMReX_Geometry.H:355
Definition AMReX_Tuple.H:93
static constexpr int warp_size
Definition AMReX_GpuDevice.H:194
__host__ __device__ Dim3 dim3() const noexcept
Definition AMReX_IntVect.H:170
__host__ static __device__ constexpr IntVectND< dim > TheUnitVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:687
__host__ static __device__ constexpr IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:677
a one-thingy-per-box distributed object
Definition AMReX_LayoutData.H:13
Definition AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:141
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:38
Definition AMReX_PODVector.H:297
iterator begin() noexcept
Definition AMReX_PODVector.H:663
iterator end() noexcept
Definition AMReX_PODVector.H:667
T * data() noexcept
Definition AMReX_PODVector.H:655
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition AMReX_Periodicity.cpp:52
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Definition AMReX_iMultiFab.H:32
void copy(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:121
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:99
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:260
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
bool inNoSyncRegion() noexcept
Definition AMReX_GpuControl.H:152
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:241
void Sum(T &v, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:204
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
void reduce_to_plane(Array4< T > const &ar, int direction, Box const &bx, int box_no, F const &f)
Definition AMReX_MultiFabUtil.H:1201
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition AMReX_Box.H:1453
void FillRandomNormal(MultiFab &mf, int scomp, int ncomp, Real mean, Real stddev)
Fill MultiFab with random numbers from normal distribution.
Definition AMReX_MultiFabUtil.cpp:1234
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2852
void FillRandom(MultiFab &mf, int scomp, int ncomp)
Fill MultiFab with random numbers from uniform distribution.
Definition AMReX_MultiFabUtil.cpp:1221
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:191
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
__host__ __device__ void cast(BaseFab< Tto > &tofab, BaseFab< Tfrom > const &fromfab, Box const &bx, SrcComp scomp, DestComp dcomp, NumComps ncomp) noexcept
Definition AMReX_BaseFabUtility.H:13
std::unique_ptr< MultiFab > get_slice_data(int dir, Real coord, const MultiFab &cc, const Geometry &geom, int start_comp, int ncomp, bool interpolate, RealBox const &bnd_rbx)
Definition AMReX_MultiFabUtil.cpp:574
Vector< typename MF::value_type > get_cell_data(MF const &mf, IntVect const &cell)
Get data in a cell of MultiFab/FabArray.
Definition AMReX_MultiFabUtil.H:1107
__host__ __device__ Dim3 length(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:326
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray. The reduce result is local and it's the user's responsibility ...
Definition AMReX_ParReduce.H:48
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, IntVect const &ng_vect)
Definition AMReX_MultiFabUtil.cpp:155
BaseFab< T > ReduceToPlane(int direction, Box const &domain, FabArray< FAB > const &mf, F const &f)
Reduce FabArray/MultiFab data to a plane Fab.
Definition AMReX_MultiFabUtil.H:1291
std::pair< FA, FA > ReduceToPlaneMF2(int direction, Box const &domain, FA const &mf, F const &f)
Reduce FabArray/MultiFab data to plane FabArray.
Definition AMReX_MultiFabUtil.H:1378
Vector< MultiFab > convexify(Vector< MultiFab const * > const &mf, Vector< IntVect > const &refinement_ratio)
Convexify AMR data.
Definition AMReX_MultiFabUtil.cpp:1247
FA ReduceToPlaneMF(int direction, Box const &domain, FA const &mf, F const &f)
Reduce FabArray/MultiFab data to plane FabArray.
Definition AMReX_MultiFabUtil.H:1363
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition AMReX_MultiFabUtil.H:989
__host__ __device__ void amrex_avgdown_nodes(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:253
MultiFab periodicShift(MultiFab const &mf, IntVect const &offset, Periodicity const &period)
Periodic shift MultiFab.
Definition AMReX_MultiFabUtil.cpp:814
__host__ __device__ void amrex_avgdown_faces(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio, int) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:130
__host__ __device__ void amrex_avgdown(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:195
__host__ __device__ BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:1322
Real volumeWeightedSum(Vector< MultiFab const * > const &mf, int icomp, Vector< Geometry > const &geom, Vector< IntVect > const &ratio, bool local)
Volume weighted sum for a vector of MultiFabs.
Definition AMReX_MultiFabUtil.cpp:980
BoxND< 3 > Box
Definition AMReX_BaseFwd.H:27
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition AMReX_MFIter.H:219
MultiFab ToMultiFab(const iMultiFab &imf)
Convert iMultiFab to MultiFab.
Definition AMReX_MultiFabUtil.cpp:564
void writeFabs(const MultiFab &mf, const std::string &name)
Write each fab individually.
Definition AMReX_MultiFabUtil.cpp:551
Real NormHelper(const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, F const &f, int numcomp, IntVect nghost, bool local)
Returns part of a norm based on two MultiFabs.
Definition AMReX_MultiFabUtil.H:792
void computeDivergence(MultiFab &divu, const Array< MultiFab const *, 3 > &umac, const Geometry &geom)
Computes divergence of face-data stored in the umac MultiFab.
Definition AMReX_MultiFabUtil.cpp:713
void average_down_edges(const Vector< const MultiFab * > &fine, const Vector< MultiFab * > &crse, const IntVect &ratio, int ngcrse)
Average fine edge-based MultiFab onto crse edge-based MultiFab.
Definition AMReX_MultiFabUtil.cpp:469
Gpu::HostVector< Real > sumToLine(MultiFab const &mf, int icomp, int ncomp, Box const &domain, int direction, bool local)
Sum MultiFab data to line.
Definition AMReX_MultiFabUtil.cpp:838
IntVectND< 3 > IntVect
Definition AMReX_BaseFwd.H:30
void print_state(const MultiFab &mf, const IntVect &cell, const int n, const IntVect &ng)
Output state data for a single zone.
Definition AMReX_MultiFabUtil.cpp:546
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
void sum_fine_to_coarse(const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio, const Geometry &cgeom, const Geometry &)
Definition AMReX_MultiFabUtil.cpp:415
void average_cellcenter_to_face(const Vector< MultiFab * > &fc, const MultiFab &cc, const Geometry &geom, int ncomp, bool use_harmonic_averaging)
Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.
Definition AMReX_MultiFabUtil.cpp:240
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
void average_node_to_cellcenter(MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, int ngrow)
Average nodal-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:62
void FourthOrderInterpFromFineToCoarse(MultiFab &cmf, int scomp, int ncomp, MultiFab const &fmf, IntVect const &ratio)
Fourth-order interpolation from fine to coarse level.
Definition AMReX_MultiFabUtil.cpp:1134
FabArray< BaseFab< Long > > ToLongMultiFab(const iMultiFab &imf)
Convert iMultiFab to Long.
Definition AMReX_MultiFabUtil.cpp:569
MF get_line_data(MF const &mf, int dir, IntVect const &cell, Box const &bnd_bx=Box())
Get data in a line of MultiFab/FabArray.
Definition AMReX_MultiFabUtil.H:1138
void computeGradient(MultiFab &grad, const Array< MultiFab const *, 3 > &umac, const Geometry &geom)
Computes gradient of face-data stored in the umac MultiFab.
Definition AMReX_MultiFabUtil.cpp:765
__host__ __device__ void amrex_avg_fc_to_cc(int i, int, int, Array4< CT > const &cc, Array4< FT const > const &fx, int cccomp, GeometryData const &gd) noexcept
Definition AMReX_MultiFabUtil_1D_C.H:33
void average_edge_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, int ngrow)
Average edge-based MultiFab onto cell-centered MultiFab.
Definition AMReX_MultiFabUtil.cpp:104
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
Definition AMReX_MultiFabUtil.cpp:629
__host__ __device__ Dim3 lbound(Array4< T > const &a) noexcept
Definition AMReX_Array4.H:312
std::array< T, N > Array
Definition AMReX_Array.H:24
void average_down_nodal(const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, const IntVect &ratio, int ngcrse=0, bool mfiter_is_definitely_safe=false)
Average fine node-based MultiFab onto crse node-centered MultiFab.
Definition AMReX_MultiFabUtil.H:606
Definition AMReX_FabArrayCommI.H:1000
Definition AMReX_Array4.H:61
__host__ __device__ T * ptr(int i, int j, int k) const noexcept
Definition AMReX_Array4.H:149
parallel copy or add
Definition AMReX_FabArrayBase.H:538
Definition AMReX_Geometry.H:25
int coord
Definition AMReX_Geometry.H:59
Definition AMReX_GpuTypes.H:86
Test if a given type T is callable with arguments of type Args...
Definition AMReX_TypeTraits.H:215
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
Struct for holding types.
Definition AMReX_TypeList.H:12