Block-Structured AMR Software Framework
AMReX_MultiFabUtil.H
Go to the documentation of this file.
1 #ifndef AMREX_MultiFabUtil_H_
2 #define AMREX_MultiFabUtil_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MultiFab.H>
6 #include <AMReX_iMultiFab.H>
7 #include <AMReX_LayoutData.H>
8 #include <AMReX_MFIter.H>
9 #include <AMReX_Array.H>
10 #include <AMReX_Vector.H>
11 #include <AMReX_MultiFabUtil_C.H>
12 
13 #include <AMReX_MultiFabUtilI.H>
14 
15 namespace amrex
16 {
18  void average_node_to_cellcenter (MultiFab& cc, int dcomp,
19  const MultiFab& nd, int scomp,
20  int ncomp, int ngrow = 0);
21 
28  void average_edge_to_cellcenter (MultiFab& cc, int dcomp,
29  const Vector<const MultiFab*>& edge,
30  int ngrow = 0);
32  void average_face_to_cellcenter (MultiFab& cc, int dcomp,
33  const Vector<const MultiFab*>& fc,
34  int ngrow = 0);
36  template <typename CMF, typename FMF,
37  std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> = 0>
38  void average_face_to_cellcenter (CMF& cc, int dcomp,
39  const Array<const FMF*,AMREX_SPACEDIM>& fc,
40  int ngrow = 0);
42  void average_face_to_cellcenter (MultiFab& cc,
43  const Vector<const MultiFab*>& fc,
44  const Geometry& geom);
46  void average_face_to_cellcenter (MultiFab& cc,
47  const Array<const MultiFab*,AMREX_SPACEDIM>& fc,
48  const Geometry& geom);
50  void average_cellcenter_to_face (const Vector<MultiFab*>& fc,
51  const MultiFab& cc,
52  const Geometry& geom,
53  int ncomp = 1,
54  bool use_harmonic_averaging = false);
56  void average_cellcenter_to_face (const Array<MultiFab*,AMREX_SPACEDIM>& fc,
57  const MultiFab& cc,
58  const Geometry& geom,
59  int ncomp = 1,
60  bool use_harmonic_averaging = false);
61 
63  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
64  void average_down_faces (const Vector<const MF*>& fine,
65  const Vector<MF*>& crse,
66  const IntVect& ratio,
67  int ngcrse = 0);
69  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
70  void average_down_faces (const Vector<const MF*>& fine,
71  const Vector<MF*>& crse,
72  int ratio,
73  int ngcrse = 0);
75  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
76  void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
77  const Array<MF*,AMREX_SPACEDIM>& crse,
78  const IntVect& ratio,
79  int ngcrse = 0);
81  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
82  void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
83  const Array<MF*,AMREX_SPACEDIM>& crse,
84  int ratio,
85  int ngcrse = 0);
92  template <typename FAB>
93  void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
94  const IntVect& ratio, int ngcrse=0);
95 
96  // This version takes periodicity into account.
97  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> = 0>
98  void average_down_faces (const Array<const MF*,AMREX_SPACEDIM>& fine,
99  const Array<MF*,AMREX_SPACEDIM>& crse,
100  const IntVect& ratio, const Geometry& crse_geom);
101  // This version takes periodicity into account.
102  template <typename FAB>
103  void average_down_faces (const FabArray<FAB>& fine, FabArray<FAB>& crse,
104  const IntVect& ratio, const Geometry& crse_geom);
105 
107  void average_down_edges (const Vector<const MultiFab*>& fine,
108  const Vector<MultiFab*>& crse,
109  const IntVect& ratio,
110  int ngcrse = 0);
111  void average_down_edges (const Array<const MultiFab*,AMREX_SPACEDIM>& fine,
112  const Array<MultiFab*,AMREX_SPACEDIM>& crse,
113  const IntVect& ratio,
114  int ngcrse = 0);
118  void average_down_edges (const MultiFab& fine, MultiFab& crse,
119  const IntVect& ratio, int ngcrse=0);
120 
122  template <typename FAB>
123  void average_down_nodal (const FabArray<FAB>& S_fine,
124  FabArray<FAB>& S_crse,
125  const IntVect& ratio,
126  int ngcrse = 0,
127  bool mfiter_is_definitely_safe=false);
128 
135  void average_down (const MultiFab& S_fine, MultiFab& S_crse,
136  const Geometry& fgeom, const Geometry& cgeom,
137  int scomp, int ncomp, const IntVect& ratio);
138  void average_down (const MultiFab& S_fine, MultiFab& S_crse,
139  const Geometry& fgeom, const Geometry& cgeom,
140  int scomp, int ncomp, int rr);
141 
145  template<typename FAB>
146  void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
147  int scomp, int ncomp, const IntVect& ratio);
148  template<typename FAB>
149  void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
150  int scomp, int ncomp, int rr);
151 
154  void sum_fine_to_coarse (const MultiFab& S_Fine, MultiFab& S_crse,
155  int scomp, int ncomp,
156  const IntVect& ratio,
157  const Geometry& cgeom, const Geometry& fgeom);
158 
160  void print_state (const MultiFab& mf, const IntVect& cell, int n=-1,
161  const IntVect& ng = IntVect::TheZeroVector());
162 
164  void writeFabs (const MultiFab& mf, const std::string& name);
165  void writeFabs (const MultiFab& mf, int comp, int ncomp, const std::string& name);
166 
169  std::unique_ptr<MultiFab> get_slice_data(int dir, Real coord,
170  const MultiFab& cc,
171  const Geometry& geom, int start_comp, int ncomp,
172  bool interpolate=false,
173  RealBox const& bnd_rbx = RealBox());
174 
182  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
183  Vector<typename MF::value_type> get_cell_data (MF const& mf, IntVect const& cell);
184 
191  template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO = 0>
192  MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx = Box());
193 
197  template <typename FAB>
198  iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
199  int crse_value = 0, int fine_value = 1);
200  iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
201  const BoxArray& fba, const IntVect& ratio,
202  int crse_value = 0, int fine_value = 1);
203  template <typename FAB>
204  iMultiFab makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
205  Periodicity const& period, int crse_value, int fine_value);
206  iMultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
207  const IntVect& cnghost, const BoxArray& fba, const IntVect& ratio,
208  Periodicity const& period, int crse_value, int fine_value);
209  template <typename FAB>
210  iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
211  const IntVect& cnghost, const IntVect& ratio,
212  Periodicity const& period, int crse_value, int fine_value);
213  template <typename FAB>
214  iMultiFab makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
215  const IntVect& cnghost, const IntVect& ratio,
216  Periodicity const& period, int crse_value, int fine_value,
217  LayoutData<int>& has_cf);
218 
219  MultiFab makeFineMask (const BoxArray& cba, const DistributionMapping& cdm,
220  const BoxArray& fba, const IntVect& ratio,
221  Real crse_value, Real fine_value);
222 
224  void computeDivergence (MultiFab& divu, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
225  const Geometry& geom);
226 
228  void computeGradient (MultiFab& grad, const Array<MultiFab const*,AMREX_SPACEDIM>& umac,
229  const Geometry& geom);
230 
232  MultiFab ToMultiFab (const iMultiFab& imf);
234  FabArray<BaseFab<Long> > ToLongMultiFab (const iMultiFab& imf);
235 
237  MultiFab periodicShift (MultiFab const& mf, IntVect const& offset,
238  Periodicity const& period);
239 
241  template <typename T, typename U>
242  T cast (U const& mf_in)
243  {
244  T mf_out(mf_in.boxArray(), mf_in.DistributionMap(), mf_in.nComp(), mf_in.nGrowVect());
245 
246 #ifdef AMREX_USE_OMP
247 #pragma omp parallel if (Gpu::notInLaunchRegion())
248 #endif
249  for (MFIter mfi(mf_in); mfi.isValid(); ++mfi)
250  {
251  const Long n = mfi.fabbox().numPts() * mf_in.nComp();
252  auto * pdst = mf_out[mfi].dataPtr();
253  auto const* psrc = mf_in [mfi].dataPtr();
255  {
256  pdst[i] = static_cast<typename U::value_type>(psrc[i]); // NOLINT(bugprone-signed-char-misuse)
257  });
258  }
259  return mf_out;
260  }
261 
322  template <typename Op, typename T, typename FAB, typename F,
323  std::enable_if_t<IsBaseFab<FAB>::value
324 #ifndef AMREX_USE_CUDA
325  && IsCallableR<T,F,int,int,int,int>::value
326 #endif
327  , int> FOO = 0>
328  BaseFab<T>
329  ReduceToPlane (int direction, Box const& domain, FabArray<FAB> const& mf, F const& f);
330 
346  Gpu::HostVector<Real> sumToLine (MultiFab const& mf, int icomp, int ncomp,
347  Box const& domain, int direction, bool local = false);
348 
356  Real volumeWeightedSum (Vector<MultiFab const*> const& mf, int icomp,
357  Vector<Geometry> const& geom,
358  Vector<IntVect> const& ratio,
359  bool local = false);
360 
374  void FourthOrderInterpFromFineToCoarse (MultiFab& cmf, int scomp, int ncomp,
375  MultiFab const& fmf,
376  IntVect const& ratio);
377 
388  void FillRandom (MultiFab& mf, int scomp, int ncomp);
389 
401  void FillRandomNormal (MultiFab& mf, int scomp, int ncomp, Real mean, Real stddev);
402 
414  [[nodiscard]] Vector<MultiFab> convexify (Vector<MultiFab const*> const& mf,
415  Vector<IntVect> const& refinement_ratio);
416 }
417 
418 namespace amrex {
419 
420 template <typename FAB>
421 iMultiFab
422 makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
423  int crse_value, int fine_value)
424 {
425  return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
426  fba, ratio, Periodicity::NonPeriodic(), crse_value, fine_value);
427 }
428 
429 template <typename FAB>
430 iMultiFab
431 makeFineMask (const FabArray<FAB>& cmf, const BoxArray& fba, const IntVect& ratio,
432  Periodicity const& period, int crse_value, int fine_value)
433 {
434  return makeFineMask(cmf.boxArray(), cmf.DistributionMap(), cmf.nGrowVect(),
435  fba, ratio, period, crse_value, fine_value);
436 }
437 
438 template <typename FAB>
439 iMultiFab
440 makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
441  const IntVect& cnghost, const IntVect& ratio,
442  Periodicity const& period, int crse_value, int fine_value)
443 {
444  iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
445  mask.setVal(crse_value);
446 
447  iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
448  1, 0, MFInfo().SetAlloc(false));
449  const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
450  mask.setVal(fine_value, cpc, 0, 1);
451 
452  return mask;
453 }
454 
455 template <typename FAB>
456 iMultiFab
457 makeFineMask (const FabArray<FAB>& cmf, const FabArray<FAB>& fmf,
458  const IntVect& cnghost, const IntVect& ratio,
459  Periodicity const& period, int crse_value, int fine_value,
460  LayoutData<int>& has_cf)
461 {
462  iMultiFab mask(cmf.boxArray(), cmf.DistributionMap(), 1, cnghost);
463  mask.setVal(crse_value);
464 
465  iMultiFab foo(amrex::coarsen(fmf.boxArray(),ratio), fmf.DistributionMap(),
466  1, 0, MFInfo().SetAlloc(false));
467  const FabArrayBase::CPC& cpc = mask.getCPC(cnghost,foo,IntVect::TheZeroVector(),period);
468  mask.setVal(fine_value, cpc, 0, 1);
469 
470  has_cf = mask.RecvLayoutMask(cpc);
471 
472  return mask;
473 }
474 
477 template <typename FAB>
479  const IntVect& ratio, int ngcrse, bool mfiter_is_definitely_safe)
480 {
481  AMREX_ASSERT(fine.is_nodal());
482  AMREX_ASSERT(crse.is_nodal());
483  AMREX_ASSERT(crse.nComp() == fine.nComp());
484 
485  int ncomp = crse.nComp();
486  using value_type = typename FAB::value_type;
487 
488  if (mfiter_is_definitely_safe || isMFIterSafe(fine, crse))
489  {
490 #ifdef AMREX_USE_OMP
491 #pragma omp parallel if (Gpu::notInLaunchRegion())
492 #endif
493  for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
494  {
495  const Box& bx = mfi.growntilebox(ngcrse);
496  Array4<value_type> const& crsearr = crse.array(mfi);
497  Array4<value_type const> const& finearr = fine.const_array(mfi);
498 
500  {
501  amrex_avgdown_nodes(tbx,crsearr,finearr,0,0,ncomp,ratio);
502  });
503  }
504  }
505  else
506  {
507  FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
508  ncomp, ngcrse);
509  average_down_nodal(fine, ctmp, ratio, ngcrse);
510  crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
511  }
512 }
513 
514 // *************************************************************************************************************
515 
516 // Average fine cell-based MultiFab onto crse cell-centered MultiFab.
517 // We do NOT assume that the coarse layout is a coarsened version of the fine layout.
518 // This version does NOT use volume-weighting
519 template<typename FAB>
520 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse, int scomp, int ncomp, int rr)
521 {
522  average_down(S_fine,S_crse,scomp,ncomp,rr*IntVect::TheUnitVector());
523 }
524 
525 template<typename FAB>
526 void average_down (const FabArray<FAB>& S_fine, FabArray<FAB>& S_crse,
527  int scomp, int ncomp, const IntVect& ratio)
528 {
529  BL_PROFILE("amrex::average_down");
530  AMREX_ASSERT(S_crse.nComp() == S_fine.nComp());
531  AMREX_ASSERT((S_crse.is_cell_centered() && S_fine.is_cell_centered()) ||
532  (S_crse.is_nodal() && S_fine.is_nodal()));
533 
534  using value_type = typename FAB::value_type;
535 
536  bool is_cell_centered = S_crse.is_cell_centered();
537 
538  //
539  // Coarsen() the fine stuff on processors owning the fine data.
540  //
541  BoxArray crse_S_fine_BA = S_fine.boxArray(); crse_S_fine_BA.coarsen(ratio);
542 
543  if (crse_S_fine_BA == S_crse.boxArray() && S_fine.DistributionMap() == S_crse.DistributionMap())
544  {
545 #ifdef AMREX_USE_GPU
546  if (Gpu::inLaunchRegion() && S_crse.isFusingCandidate()) {
547  auto const& crsema = S_crse.arrays();
548  auto const& finema = S_fine.const_arrays();
549  if (is_cell_centered) {
550  ParallelFor(S_crse, IntVect(0), ncomp,
551  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
552  {
553  amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
554  });
555  } else {
556  ParallelFor(S_crse, IntVect(0), ncomp,
557  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
558  {
559  amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],scomp,scomp,ratio);
560  });
561  }
562  if (!Gpu::inNoSyncRegion()) {
564  }
565  } else
566 #endif
567  {
568 #ifdef AMREX_USE_OMP
569 #pragma omp parallel if (Gpu::notInLaunchRegion())
570 #endif
571  for (MFIter mfi(S_crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
572  {
573  // NOTE: The tilebox is defined at the coarse level.
574  const Box& bx = mfi.tilebox();
575  Array4<value_type> const& crsearr = S_crse.array(mfi);
576  Array4<value_type const> const& finearr = S_fine.const_array(mfi);
577 
578  if (is_cell_centered) {
579  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
580  {
581  amrex_avgdown(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
582  });
583  } else {
584  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
585  {
586  amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,scomp,scomp,ratio);
587  });
588  }
589  }
590  }
591  }
592  else
593  {
594  FabArray<FAB> crse_S_fine(crse_S_fine_BA, S_fine.DistributionMap(), ncomp, 0, MFInfo(),DefaultFabFactory<FAB>());
595 
596 #ifdef AMREX_USE_GPU
597  if (Gpu::inLaunchRegion() && crse_S_fine.isFusingCandidate()) {
598  auto const& crsema = crse_S_fine.arrays();
599  auto const& finema = S_fine.const_arrays();
600  if (is_cell_centered) {
601  ParallelFor(crse_S_fine, IntVect(0), ncomp,
602  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
603  {
604  amrex_avgdown(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
605  });
606  } else {
607  ParallelFor(crse_S_fine, IntVect(0), ncomp,
608  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
609  {
610  amrex_avgdown_nodes(i,j,k,n,crsema[box_no],finema[box_no],0,scomp,ratio);
611  });
612  }
613  if (!Gpu::inNoSyncRegion()) {
615  }
616  } else
617 #endif
618  {
619 #ifdef AMREX_USE_OMP
620 #pragma omp parallel if (Gpu::notInLaunchRegion())
621 #endif
622  for (MFIter mfi(crse_S_fine,TilingIfNotGPU()); mfi.isValid(); ++mfi)
623  {
624  // NOTE: The tilebox is defined at the coarse level.
625  const Box& bx = mfi.tilebox();
626  Array4<value_type> const& crsearr = crse_S_fine.array(mfi);
627  Array4<value_type const> const& finearr = S_fine.const_array(mfi);
628 
629  // NOTE: We copy from component scomp of the fine fab into component 0 of the crse fab
630  // because the crse fab is a temporary which was made starting at comp 0, it is
631  // not part of the actual crse multifab which came in.
632 
633  if (is_cell_centered) {
634  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
635  {
636  amrex_avgdown(i,j,k,n,crsearr,finearr,0,scomp,ratio);
637  });
638  } else {
639  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
640  {
641  amrex_avgdown_nodes(i,j,k,n,crsearr,finearr,0,scomp,ratio);
642  });
643  }
644  }
645  }
646 
647  S_crse.ParallelCopy(crse_S_fine,0,scomp,ncomp);
648  }
649 }
650 
651 
652 
653 
654 
662 template <typename F>
663 Real
664 NormHelper (const MultiFab& x, int xcomp,
665  const MultiFab& y, int ycomp,
666  F const& f,
667  int numcomp, IntVect nghost, bool local)
668 {
669  BL_ASSERT(x.boxArray() == y.boxArray());
670  BL_ASSERT(x.DistributionMap() == y.DistributionMap());
671  BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
672 
673  Real sm = Real(0.0);
674 #ifdef AMREX_USE_GPU
675  if (Gpu::inLaunchRegion()) {
676  auto const& xma = x.const_arrays();
677  auto const& yma = y.const_arrays();
678  sm = ParReduce(TypeList<ReduceOpSum>{}, TypeList<Real>{}, x, nghost,
679  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
680  {
681  Real t = Real(0.0);
682  auto const& xfab = xma[box_no];
683  auto const& yfab = yma[box_no];
684  for (int n = 0; n < numcomp; ++n) {
685  t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
686  }
687  return t;
688  });
689  } else
690 #endif
691  {
692 #ifdef AMREX_USE_OMP
693 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
694 #endif
695  for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
696  {
697  Box const& bx = mfi.growntilebox(nghost);
698  Array4<Real const> const& xfab = x.const_array(mfi);
699  Array4<Real const> const& yfab = y.const_array(mfi);
700  AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
701  {
702  sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
703  });
704  }
705  }
706 
707  if (!local) {
709  }
710 
711  return sm;
712 }
713 
722 template <typename MMF, typename Pred, typename F>
723 Real
724 NormHelper (const MMF& mask,
725  const MultiFab& x, int xcomp,
726  const MultiFab& y, int ycomp,
727  Pred const& pf,
728  F const& f,
729  int numcomp, IntVect nghost, bool local)
730 {
731  BL_ASSERT(x.boxArray() == y.boxArray());
732  BL_ASSERT(x.boxArray() == mask.boxArray());
733  BL_ASSERT(x.DistributionMap() == y.DistributionMap());
734  BL_ASSERT(x.DistributionMap() == mask.DistributionMap());
735  BL_ASSERT(x.nGrowVect().allGE(nghost) && y.nGrowVect().allGE(nghost));
736  BL_ASSERT(mask.nGrowVect().allGE(nghost));
737 
738  Real sm = Real(0.0);
739 #ifdef AMREX_USE_GPU
740  if (Gpu::inLaunchRegion()) {
741  auto const& xma = x.const_arrays();
742  auto const& yma = y.const_arrays();
743  auto const& mma = mask.const_arrays();
744  sm = ParReduce(TypeList<ReduceOpSum>{}, TypeList<Real>{}, x, nghost,
745  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept -> GpuTuple<Real>
746  {
747  Real t = Real(0.0);
748  if (pf(mma[box_no](i,j,k))) {
749  auto const& xfab = xma[box_no];
750  auto const& yfab = yma[box_no];
751  for (int n = 0; n < numcomp; ++n) {
752  t += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
753  }
754  }
755  return t;
756  });
757  } else
758 #endif
759  {
760 #ifdef AMREX_USE_OMP
761 #pragma omp parallel if (!system::regtest_reduction) reduction(+:sm)
762 #endif
763  for (MFIter mfi(x,true); mfi.isValid(); ++mfi)
764  {
765  Box const& bx = mfi.growntilebox(nghost);
766  Array4<Real const> const& xfab = x.const_array(mfi);
767  Array4<Real const> const& yfab = y.const_array(mfi);
768  auto const& mfab = mask.const_array(mfi);
769  AMREX_LOOP_4D(bx, numcomp, i, j, k, n,
770  {
771  if (pf(mfab(i,j,k))) {
772  sm += f(xfab(i,j,k,xcomp+n) , yfab(i,j,k,ycomp+n));
773  }
774  });
775  }
776  }
777 
778  if (!local) {
780  }
781 
782  return sm;
783 }
784 
785 template <typename CMF, typename FMF,
786  std::enable_if_t<IsFabArray_v<CMF> && IsFabArray_v<FMF>, int> FOO>
787 void average_face_to_cellcenter (CMF& cc, int dcomp,
789  int ngrow)
790 {
791  AMREX_ASSERT(cc.nComp() >= dcomp + AMREX_SPACEDIM);
792  AMREX_ASSERT(fc[0]->nComp() == 1);
793 
794 #ifdef AMREX_USE_GPU
795  if (Gpu::inLaunchRegion() && cc.isFusingCandidate()) {
796  auto const& ccma = cc.arrays();
797  AMREX_D_TERM(auto const& fxma = fc[0]->const_arrays();,
798  auto const& fyma = fc[1]->const_arrays();,
799  auto const& fzma = fc[2]->const_arrays(););
800  ParallelFor(cc, IntVect(ngrow),
801  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k) noexcept
802  {
803 #if (AMREX_SPACEDIM == 1)
804  GeometryData gd{};
805  gd.coord = 0;
806 #endif
807  amrex_avg_fc_to_cc(i,j,k, ccma[box_no], AMREX_D_DECL(fxma[box_no],
808  fyma[box_no],
809  fzma[box_no]),
810  dcomp
811 #if (AMREX_SPACEDIM == 1)
812  , gd
813 #endif
814  );
815  });
816  if (!Gpu::inNoSyncRegion()) {
818  }
819  } else
820 #endif
821  {
822 #ifdef AMREX_USE_OMP
823 #pragma omp parallel if (Gpu::notInLaunchRegion())
824 #endif
825  for (MFIter mfi(cc,TilingIfNotGPU()); mfi.isValid(); ++mfi)
826  {
827  const Box bx = mfi.growntilebox(ngrow);
828  auto const& ccarr = cc.array(mfi);
829  AMREX_D_TERM(auto const& fxarr = fc[0]->const_array(mfi);,
830  auto const& fyarr = fc[1]->const_array(mfi);,
831  auto const& fzarr = fc[2]->const_array(mfi););
832 
833 #if (AMREX_SPACEDIM == 1)
835  {
836  GeometryData gd;
837  gd.coord = 0;
838  amrex_avg_fc_to_cc(i,j,k, ccarr, fxarr, dcomp, gd);
839  });
840 #else
842  {
843  amrex_avg_fc_to_cc(i,j,k, ccarr, AMREX_D_DECL(fxarr,fyarr,fzarr), dcomp);
844  });
845 #endif
846  }
847  }
848 }
849 
850 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
852  const Vector<MF*>& crse,
853  const IntVect& ratio, int ngcrse)
854 {
855  AMREX_ASSERT(fine.size() == AMREX_SPACEDIM && crse.size() == AMREX_SPACEDIM);
857  {{AMREX_D_DECL(fine[0],fine[1],fine[2])}},
859  {{AMREX_D_DECL(crse[0],crse[1],crse[2])}},
860  ratio, ngcrse);
861 }
862 
863 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
865  const Vector<MF*>& crse, int ratio, int ngcrse)
866 {
867  average_down_faces(fine,crse,IntVect{ratio},ngcrse);
868 }
869 
870 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
873  int ratio, int ngcrse)
874 {
875  average_down_faces(fine,crse,IntVect{ratio},ngcrse);
876 }
877 
878 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
881  const IntVect& ratio, int ngcrse)
882 {
883  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
884  average_down_faces(*fine[idim], *crse[idim], ratio, ngcrse);
885  }
886 }
887 
888 template <typename FAB>
890  const IntVect& ratio, int ngcrse)
891 {
892  BL_PROFILE("average_down_faces");
893 
894  AMREX_ASSERT(crse.nComp() == fine.nComp());
895  AMREX_ASSERT(fine.ixType() == crse.ixType());
896  const auto type = fine.ixType();
897  int dir;
898  for (dir = 0; dir < AMREX_SPACEDIM; ++dir) {
899  if (type.nodeCentered(dir)) { break; }
900  }
901  auto tmptype = type;
902  tmptype.unset(dir);
903  if (dir >= AMREX_SPACEDIM || !tmptype.cellCentered()) {
904  amrex::Abort("average_down_faces: not face index type");
905  }
906  const int ncomp = crse.nComp();
907  if (isMFIterSafe(fine, crse))
908  {
909 #ifdef AMREX_USE_GPU
910  if (Gpu::inLaunchRegion() && crse.isFusingCandidate()) {
911  auto const& crsema = crse.arrays();
912  auto const& finema = fine.const_arrays();
913  ParallelFor(crse, IntVect(ngcrse), ncomp,
914  [=] AMREX_GPU_DEVICE (int box_no, int i, int j, int k, int n) noexcept
915  {
916  amrex_avgdown_faces(i,j,k,n, crsema[box_no], finema[box_no], 0, 0, ratio, dir);
917  });
918  if (!Gpu::inNoSyncRegion()) {
920  }
921  } else
922 #endif
923  {
924 #ifdef AMREX_USE_OMP
925 #pragma omp parallel if (Gpu::notInLaunchRegion())
926 #endif
927  for (MFIter mfi(crse,TilingIfNotGPU()); mfi.isValid(); ++mfi)
928  {
929  const Box& bx = mfi.growntilebox(ngcrse);
930  auto const& crsearr = crse.array(mfi);
931  auto const& finearr = fine.const_array(mfi);
932  AMREX_HOST_DEVICE_PARALLEL_FOR_4D(bx, ncomp, i, j, k, n,
933  {
934  amrex_avgdown_faces(i,j,k,n, crsearr, finearr, 0, 0, ratio, dir);
935  });
936  }
937  }
938  }
939  else
940  {
941  FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
942  ncomp, ngcrse, MFInfo(), DefaultFabFactory<FAB>());
943  average_down_faces(fine, ctmp, ratio, ngcrse);
944  crse.ParallelCopy(ctmp,0,0,ncomp,ngcrse,ngcrse);
945  }
946 }
947 
948 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int>>
951  const IntVect& ratio, const Geometry& crse_geom)
952 {
953  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
954  average_down_faces(*fine[idim], *crse[idim], ratio, crse_geom);
955  }
956 }
957 
958 template <typename FAB>
960  const IntVect& ratio, const Geometry& crse_geom)
961 {
962  FabArray<FAB> ctmp(amrex::coarsen(fine.boxArray(),ratio), fine.DistributionMap(),
963  crse.nComp(), 0);
964  average_down_faces(fine, ctmp, ratio, 0);
965  crse.ParallelCopy(ctmp,0,0,crse.nComp(),0,0,crse_geom.periodicity());
966 }
967 
968 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
970 {
971  using T = typename MF::value_type;
972  const int ncomp = mf.nComp();
973  Gpu::DeviceVector<T> dv(ncomp);
974  auto* dp = dv.data();
975  bool found = false;
976  auto loc = cell.dim3();
977  for (MFIter mfi(mf); mfi.isValid() && !found; ++mfi)
978  {
979  Box const& box = mfi.validbox();
980  if (box.contains(cell)) {
981  found = true;
982  auto const& fab = mf.const_array(mfi);
983  amrex::ParallelFor(1, [=] AMREX_GPU_DEVICE (int) noexcept
984  {
985  for (int n = 0; n < ncomp; ++n) {
986  dp[n] = fab(loc.x,loc.y,loc.z,n);
987  }
988  });
989  }
990  }
991  Vector<T> hv;
992  if (found) {
993  hv.resize(ncomp);
994  Gpu::copy(Gpu::deviceToHost, dv.begin(), dv.end(), hv.begin());
995  }
996  return hv;
997 }
998 
999 template <typename MF, std::enable_if_t<IsFabArray<MF>::value,int> FOO>
1000 MF get_line_data (MF const& mf, int dir, IntVect const& cell, Box const& bnd_bx)
1001 {
1002  bool do_bnd = (!bnd_bx.isEmpty());
1003 
1004  BoxArray const& ba = mf.boxArray();
1005  DistributionMapping const& dm = mf.DistributionMap();
1006  const auto nboxes = static_cast<int>(ba.size());
1007 
1008  BoxList bl(ba.ixType());
1009  Vector<int> procmap;
1010  Vector<int> index_map;
1011  if (!do_bnd) {
1012  for (int i = 0; i < nboxes; ++i) {
1013  Box const& b = ba[i];
1014  IntVect lo = cell;
1015  lo[dir] = b.smallEnd(dir);
1016  if (b.contains(lo)) {
1017  IntVect hi = lo;
1018  hi[dir] = b.bigEnd(dir);
1019  Box b1d(lo,hi,b.ixType());
1020  bl.push_back(b1d);
1021  procmap.push_back(dm[i]);
1022  index_map.push_back(i);
1023  }
1024  }
1025  } else {
1026  for (int i = 0; i < nboxes; ++i) {
1027  Box const& b = ba[i];
1028  Box const& b1d = bnd_bx & b;
1029  if (b1d.ok()) {
1030  bl.push_back(b1d);
1031  procmap.push_back(dm[i]);
1032  index_map.push_back(i);
1033  }
1034  }
1035  }
1036 
1037  if (bl.isEmpty()) {
1038  return MF();
1039  } else {
1040  BoxArray rba(std::move(bl));
1041  DistributionMapping rdm(std::move(procmap));
1042  MF rmf(rba, rdm, mf.nComp(), IntVect(0),
1043  MFInfo().SetArena(mf.arena()));
1044 #ifdef AMREX_USE_OMP
1045 #pragma omp parallel if (Gpu::notInLaunchRegion())
1046 #endif
1047  for (MFIter mfi(rmf); mfi.isValid(); ++mfi) {
1048  Box const& b = mfi.validbox();
1049  auto const& dfab = rmf.array(mfi);
1050  auto const& sfab = mf.const_array(index_map[mfi.index()]);
1051  amrex::ParallelFor(b, mf.nComp(),
1052  [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept
1053  {
1054  dfab(i,j,k,n) = sfab(i,j,k,n);
1055  });
1056  }
1057  return rmf;
1058  }
1059 }
1060 
1061 template <typename Op, typename T, typename FAB, typename F,
1062  std::enable_if_t<IsBaseFab<FAB>::value
1063 #ifndef AMREX_USE_CUDA
1064  && IsCallableR<T,F,int,int,int,int>::value
1065 #endif
1066  , int> FOO>
1067 BaseFab<T>
1068 ReduceToPlane (int direction, Box const& domain, FabArray<FAB> const& mf, F const& f)
1069 {
1070  Box domain2d = domain;
1071  domain2d.setRange(direction, 0);
1072 
1073  T initval;
1074  Op().init(initval);
1075 
1076  BaseFab<T> r(domain2d);
1077  r.template setVal<RunOn::Device>(initval);
1078  auto const& ar = r.array();
1079 
1080  for (MFIter mfi(mf,MFItInfo().UseDefaultStream().DisableDeviceSync());
1081  mfi.isValid(); ++mfi)
1082  {
1083  Box bx = mfi.validbox() & domain;
1084  if (bx.ok()) {
1085  int box_no = mfi.LocalIndex();
1086 #if defined(AMREX_USE_GPU)
1087  Box b2d = bx;
1088  b2d.setRange(direction,0);
1089  const auto blo = amrex::lbound(bx);
1090  const auto len = amrex::length(bx);
1091  constexpr int nthreads = 128;
1092  auto nblocks = static_cast<int>(b2d.numPts());
1093 #ifdef AMREX_USE_SYCL
1094  constexpr std::size_t shared_mem_bytes = sizeof(T)*Gpu::Device::warp_size;
1095  amrex::launch<nthreads>(nblocks, shared_mem_bytes, Gpu::gpuStream(),
1096  [=] AMREX_GPU_DEVICE (Gpu::Handler const& h)
1097  {
1098  int bid = h.blockIdx();
1099  int tid = h.threadIdx();
1100 #else
1101  amrex::launch<nthreads>(nblocks, Gpu::gpuStream(),
1102  [=] AMREX_GPU_DEVICE ()
1103  {
1104  int bid = blockIdx.x;
1105  int tid = threadIdx.x;
1106 #endif
1107  T tmp;
1108  Op().init(tmp);
1109  T* p;
1110  if (direction == 0) {
1111  int k = bid / len.y;
1112  int j = bid - k*len.y;
1113  k += blo.z;
1114  j += blo.y;
1115  for (int i = blo.x + tid; i < blo.x+len.x; i += nthreads) {
1116  Op().local_update(tmp, f(box_no,i,j,k));
1117  }
1118  p = ar.ptr(0,j,k);
1119  } else if (direction == 1) {
1120  int k = bid / len.x;
1121  int i = bid - k*len.x;
1122  k += blo.z;
1123  i += blo.x;
1124  for (int j = blo.y + tid; j < blo.y+len.y; j += nthreads) {
1125  Op().local_update(tmp, f(box_no,i,j,k));
1126  }
1127  p = ar.ptr(i,0,k);
1128  } else {
1129  int j = bid / len.x;
1130  int i = bid - j*len.x;
1131  j += blo.y;
1132  i += blo.x;
1133  for (int k = blo.z + tid; k < blo.z+len.z; k += nthreads) {
1134  Op().local_update(tmp, f(box_no,i,j,k));
1135  }
1136  p = ar.ptr(i,j,0);
1137  }
1138 #ifdef AMREX_USE_SYCL
1139  Op().template parallel_update<T>(*p, tmp, h);
1140 #else
1141  Op().template parallel_update<T,nthreads>(*p, tmp);
1142 #endif
1143  });
1144 #else
1145  // CPU
1146  if (direction == 0) {
1147  AMREX_LOOP_3D(bx, i, j, k,
1148  {
1149  Op().local_update(ar(0,j,k), f(box_no,i,j,k));
1150  });
1151  } else if (direction == 1) {
1152  AMREX_LOOP_3D(bx, i, j, k,
1153  {
1154  Op().local_update(ar(i,0,k), f(box_no,i,j,k));
1155  });
1156  } else {
1157  AMREX_LOOP_3D(bx, i, j, k,
1158  {
1159  Op().local_update(ar(i,j,0), f(box_no,i,j,k));
1160  });
1161  }
1162 #endif
1163  }
1164  }
1166 
1167  return r;
1168 }
1169 
1170 }
1171 
1172 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA(...)
Definition: AMReX_GpuLaunch.nolint.H:16
#define AMREX_HOST_DEVICE_PARALLEL_FOR_1D(...)
Definition: AMReX_GpuLaunch.nolint.H:53
#define AMREX_HOST_DEVICE_PARALLEL_FOR_3D(...)
Definition: AMReX_GpuLaunch.nolint.H:54
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
Real * pdst
Definition: AMReX_HypreMLABecLap.cpp:1090
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< int const > mask
Definition: AMReX_InterpFaceRegister.cpp:93
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_3D(bx, i, j, k, block)
Definition: AMReX_Loop.nolint.H:4
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition: AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A FortranArrayBox(FAB)-like object.
Definition: AMReX_BaseFab.H:183
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition: AMReX_BoxArray.H:836
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:596
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
bool isEmpty() const noexcept
Is this BoxList empty?
Definition: AMReX_BoxList.H:135
void push_back(const Box &bn)
Append a Box to this BoxList.
Definition: AMReX_BoxList.H:93
AMREX_GPU_HOST_DEVICE bool isEmpty() const noexcept
Checks if it is an empty BoxND.
Definition: AMReX_Box.H:196
AMREX_GPU_HOST_DEVICE bool ok() const noexcept
Checks if it is a proper BoxND (including a valid type).
Definition: AMReX_Box.H:200
AMREX_GPU_HOST_DEVICE Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition: AMReX_Box.H:346
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
AMREX_GPU_HOST_DEVICE BoxND & setRange(int dir, int sm_index, int n_cells=1) noexcept
Set the entire range in a given direction, starting at sm_index with length n_cells....
Definition: AMReX_Box.H:1046
Definition: AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
IntVect nGrowVect() const noexcept
Definition: AMReX_FabArrayBase.H:79
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition: AMReX_FabArrayBase.H:94
bool isFusingCandidate() const noexcept
Is this a good candidate for kernel fusing?
bool is_cell_centered() const noexcept
This tests on whether the FabArray is cell-centered.
bool is_nodal() const noexcept
This tests on whether the FabArray is fully nodal.
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition: AMReX_FabArrayBase.H:130
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition: AMReX_FabArrayBase.H:82
An Array of FortranArrayBox(FAB)-like Objects.
Definition: AMReX_FabArray.H:344
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1593
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition: AMReX_FabArray.H:778
Array4< typename FabArray< FAB >::value_type const > array(const MFIter &mfi) const noexcept
Definition: AMReX_FabArray.H:1561
MultiArray4< typename FabArray< FAB >::value_type > arrays() noexcept
Definition: AMReX_FabArray.H:1657
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition: AMReX_FabArray.H:1675
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition: AMReX_Geometry.H:355
Definition: AMReX_Tuple.H:93
static constexpr AMREX_EXPORT int warp_size
Definition: AMReX_GpuDevice.H:173
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition: AMReX_IntVect.H:443
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 dim3() const noexcept
Definition: AMReX_IntVect.H:163
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheUnitVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:682
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
A collection (stored as an array) of FArrayBox objects.
Definition: AMReX_MultiFab.H:38
Definition: AMReX_PODVector.H:246
T * data() noexcept
Definition: AMReX_PODVector.H:593
iterator begin() noexcept
Definition: AMReX_PODVector.H:601
iterator end() noexcept
Definition: AMReX_PODVector.H:605
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition: AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition: AMReX_Periodicity.cpp:52
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Definition: AMReX_iMultiFab.H:32
@ FAB
Definition: AMReX_AmrvisConstants.H:86
void copy(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition: AMReX_GpuContainers.H:121
static constexpr DeviceToHost deviceToHost
Definition: AMReX_GpuContainers.H:99
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
bool inNoSyncRegion() noexcept
Definition: AMReX_GpuControl.H:146
gpuStream_t gpuStream() noexcept
Definition: AMReX_GpuDevice.H:218
void Sum(T &v, MPI_Comm comm)
Definition: AMReX_ParallelReduce.H:204
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
void FillRandomNormal(MultiFab &mf, int scomp, int ncomp, Real mean, Real stddev)
Fill MultiFab with random numbers from normal distribution.
Definition: AMReX_MultiFabUtil.cpp:1212
int nComp(FabArrayBase const &fa)
void FillRandom(MultiFab &mf, int scomp, int ncomp)
Fill MultiFab with random numbers from uniform distribution.
Definition: AMReX_MultiFabUtil.cpp:1199
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avg_fc_to_cc(int i, int, int, Array4< CT > const &cc, Array4< FT const > const &fx, int cccomp, GeometryData const &gd) noexcept
Definition: AMReX_MultiFabUtil_1D_C.H:33
std::unique_ptr< MultiFab > get_slice_data(int dir, Real coord, const MultiFab &cc, const Geometry &geom, int start_comp, int ncomp, bool interpolate, RealBox const &bnd_rbx)
Definition: AMReX_MultiFabUtil.cpp:552
Vector< typename MF::value_type > get_cell_data(MF const &mf, IntVect const &cell)
Get data in a cell of MultiFab/FabArray.
Definition: AMReX_MultiFabUtil.H:969
BaseFab< T > ReduceToPlane(int direction, Box const &domain, FabArray< FAB > const &mf, F const &f)
Reduce FabArray/MultiFab data to a plane.
Definition: AMReX_MultiFabUtil.H:1068
Vector< MultiFab > convexify(Vector< MultiFab const * > const &mf, Vector< IntVect > const &refinement_ratio)
Convexify AMR data.
Definition: AMReX_MultiFabUtil.cpp:1225
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition: AMReX_MultiFabUtil.H:851
MultiFab periodicShift(MultiFab const &mf, IntVect const &offset, Periodicity const &period)
Periodic shift MultiFab.
Definition: AMReX_MultiFabUtil.cpp:792
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown_faces(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio, int) noexcept
Definition: AMReX_MultiFabUtil_1D_C.H:130
Real volumeWeightedSum(Vector< MultiFab const * > const &mf, int icomp, Vector< Geometry > const &geom, Vector< IntVect > const &ratio, bool local)
Volume weighted sum for a vector of MultiFabs.
Definition: AMReX_MultiFabUtil.cpp:958
bool isMFIterSafe(const FabArrayBase &x, const FabArrayBase &y)
Definition: AMReX_MFIter.H:219
MultiFab ToMultiFab(const iMultiFab &imf)
Convert iMultiFab to MultiFab.
Definition: AMReX_MultiFabUtil.cpp:542
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 lbound(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:308
ReduceData< Ts... >::Type ParReduce(TypeList< Ops... > operation_list, TypeList< Ts... > type_list, FabArray< FAB > const &fa, IntVect const &nghost, F &&f)
Parallel reduce for MultiFab/FabArray.
Definition: AMReX_ParReduce.H:47
void writeFabs(const MultiFab &mf, const std::string &name)
Write each fab individually.
Definition: AMReX_MultiFabUtil.cpp:529
Real NormHelper(const MultiFab &x, int xcomp, const MultiFab &y, int ycomp, F const &f, int numcomp, IntVect nghost, bool local)
Returns part of a norm based on two MultiFabs.
Definition: AMReX_MultiFabUtil.H:664
void average_down_edges(const Vector< const MultiFab * > &fine, const Vector< MultiFab * > &crse, const IntVect &ratio, int ngcrse)
Average fine edge-based MultiFab onto crse edge-based MultiFab.
Definition: AMReX_MultiFabUtil.cpp:447
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
Gpu::HostVector< Real > sumToLine(MultiFab const &mf, int icomp, int ncomp, Box const &domain, int direction, bool local)
Sum MultiFab data to line.
Definition: AMReX_MultiFabUtil.cpp:816
void average_face_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &fc, int ngrow)
Average face-based MultiFab onto cell-centered MultiFab.
Definition: AMReX_MultiFabUtil.cpp:141
AMREX_GPU_HOST_DEVICE void cast(BaseFab< Tto > &tofab, BaseFab< Tfrom > const &fromfab, Box const &bx, SrcComp scomp, DestComp dcomp, NumComps ncomp) noexcept
Definition: AMReX_BaseFabUtility.H:13
void print_state(const MultiFab &mf, const IntVect &cell, const int n, const IntVect &ng)
Output state data for a single zone.
Definition: AMReX_MultiFabUtil.cpp:524
void computeDivergence(MultiFab &divu, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom)
Computes divergence of face-data stored in the umac MultiFab.
Definition: AMReX_MultiFabUtil.cpp:691
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
void sum_fine_to_coarse(const MultiFab &S_fine, MultiFab &S_crse, int scomp, int ncomp, const IntVect &ratio, const Geometry &cgeom, const Geometry &)
Definition: AMReX_MultiFabUtil.cpp:393
void average_cellcenter_to_face(const Vector< MultiFab * > &fc, const MultiFab &cc, const Geometry &geom, int ncomp, bool use_harmonic_averaging)
Average cell-centered MultiFab onto face-based MultiFab with geometric weighting.
Definition: AMReX_MultiFabUtil.cpp:218
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 length(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:322
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition: AMReX_MultiFabUtil_1D_C.H:195
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
void average_node_to_cellcenter(MultiFab &cc, int dcomp, const MultiFab &nd, int scomp, int ncomp, int ngrow)
Average nodal-based MultiFab onto cell-centered MultiFab.
Definition: AMReX_MultiFabUtil.cpp:62
void FourthOrderInterpFromFineToCoarse(MultiFab &cmf, int scomp, int ncomp, MultiFab const &fmf, IntVect const &ratio)
Fourth-order interpolation from fine to coarse level.
Definition: AMReX_MultiFabUtil.cpp:1112
FabArray< BaseFab< Long > > ToLongMultiFab(const iMultiFab &imf)
Convert iMultiFab to Long.
Definition: AMReX_MultiFabUtil.cpp:547
MF get_line_data(MF const &mf, int dir, IntVect const &cell, Box const &bnd_bx=Box())
Get data in a line of MultiFab/FabArray.
Definition: AMReX_MultiFabUtil.H:1000
void average_edge_to_cellcenter(MultiFab &cc, int dcomp, const Vector< const MultiFab * > &edge, int ngrow)
Average edge-based MultiFab onto cell-centered MultiFab.
Definition: AMReX_MultiFabUtil.cpp:97
void computeGradient(MultiFab &grad, const Array< MultiFab const *, AMREX_SPACEDIM > &umac, const Geometry &geom)
Computes gradient of face-data stored in the umac MultiFab.
Definition: AMReX_MultiFabUtil.cpp:743
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void amrex_avgdown_nodes(Box const &bx, Array4< T > const &crse, Array4< T const > const &fine, int ccomp, int fcomp, int ncomp, IntVect const &ratio) noexcept
Definition: AMReX_MultiFabUtil_1D_C.H:253
iMultiFab makeFineMask(const BoxArray &cba, const DistributionMapping &cdm, const BoxArray &fba, const IntVect &ratio, int crse_value, int fine_value)
Definition: AMReX_MultiFabUtil.cpp:607
std::array< T, N > Array
Definition: AMReX_Array.H:24
void average_down_nodal(const FabArray< FAB > &S_fine, FabArray< FAB > &S_crse, const IntVect &ratio, int ngcrse=0, bool mfiter_is_definitely_safe=false)
Average fine node-based MultiFab onto crse node-centered MultiFab.
Definition: AMReX_MultiFabUtil.H:478
integer, parameter dp
Definition: AMReX_SDCquadrature.F90:8
Definition: AMReX_Array4.H:61
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int nComp() const noexcept
Definition: AMReX_Array4.H:248
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::size_t size() const noexcept
Definition: AMReX_Array4.H:243
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
Definition: AMReX_Geometry.H:25
int coord
Definition: AMReX_Geometry.H:59
Definition: AMReX_GpuTypes.H:86
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MFIter.H:20
Struct for holding types.
Definition: AMReX_TypeList.H:12