Block-Structured AMR Software Framework
AMReX_FillPatchUtil_I.H
Go to the documentation of this file.
1 #ifndef AMREX_FillPatchUtil_I_H_
2 #define AMREX_FillPatchUtil_I_H_
3 #include <AMReX_Config.H>
4 
5 namespace amrex {
6 
7 namespace detail {
8 
9 template <typename F, typename MF>
10 auto call_interp_hook (F const& f, MF& mf, int icomp, int ncomp)
11  -> decltype(f(mf[0],Box(),icomp,ncomp))
12 {
13 #ifdef AMREX_USE_OMP
14 #pragma omp parallel if (Gpu::notInLaunchRegion())
15 #endif
16  for (MFIter mfi(mf); mfi.isValid(); ++mfi) {
17  auto& dfab = mf[mfi];
18  const Box& dbx = dfab.box();
19  f(dfab, dbx, icomp, ncomp);
20  }
21 }
22 
23 template <typename F, typename MF>
24 auto call_interp_hook (F const& f, MF& mf, int icomp, int ncomp)
25  -> decltype(f(mf,icomp,ncomp))
26 {
27  f(mf, icomp, ncomp);
28 }
29 
30 }
31 
32 template <typename Interp>
33 bool ProperlyNested (const IntVect& ratio, const IntVect& blocking_factor, int ngrow,
34  const IndexType& boxType, Interp* mapper)
35 {
36  int ratio_max = ratio[0];
37 #if (AMREX_SPACEDIM > 1)
38  ratio_max = std::max(ratio_max, ratio[1]);
39 #endif
40 #if (AMREX_SPACEDIM == 3)
41  ratio_max = std::max(ratio_max, ratio[2]);
42 #endif
43  // There are at least this many coarse cells outside fine grids
44  // (except at physical boundaries).
45  const IntVect& nbuf = blocking_factor / ratio_max;
46 
47  Box crse_box(IntVect(AMREX_D_DECL(0 ,0 ,0 )), IntVect(AMREX_D_DECL(4*nbuf[0]-1,
48  4*nbuf[1]-1,
49  4*nbuf[2]-1)));
50  crse_box.convert(boxType);
51  Box fine_box(nbuf, IntVect(AMREX_D_DECL(3*nbuf[0]-1,3*nbuf[1]-1,3*nbuf[2]-1)));
52  fine_box.convert(boxType);
53  fine_box.refine(ratio_max);
54  fine_box.grow(ngrow);
55  const Box& fine_box_coarsened = mapper->CoarseBox(fine_box, ratio_max);
56  return crse_box.contains(fine_box_coarsened);
57 }
58 
59 template <typename MF, typename BC>
60 std::enable_if_t<IsFabArray<MF>::value>
61 FillPatchSingleLevel (MF& mf, Real time,
62  const Vector<MF*>& smf, const Vector<Real>& stime,
63  int scomp, int dcomp, int ncomp,
64  const Geometry& geom,
65  BC& physbcf, int bcfcomp)
66 {
67  FillPatchSingleLevel(mf, mf.nGrowVect(), time, smf, stime, scomp, dcomp, ncomp,
68  geom, physbcf, bcfcomp);
69 }
70 
71 template <typename MF, typename BC>
72 std::enable_if_t<IsFabArray<MF>::value>
73 FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time,
74  const Vector<MF*>& smf, const Vector<Real>& stime,
75  int scomp, int dcomp, int ncomp,
76  const Geometry& geom,
77  BC& physbcf, int bcfcomp)
78 {
79  BL_PROFILE("FillPatchSingleLevel");
80 
81  AMREX_ASSERT(scomp+ncomp <= smf[0]->nComp());
82  AMREX_ASSERT(dcomp+ncomp <= mf.nComp());
83  AMREX_ASSERT(smf.size() == stime.size());
84  AMREX_ASSERT(!smf.empty());
85  AMREX_ASSERT(nghost.allLE(mf.nGrowVect()));
86 
87  IntVect src_ghost(0);
88  if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
89  src_ghost = physbcf.fp1_src_ghost;
90  }
91 
92  if (smf.size() == 1)
93  {
94  if (&mf == smf[0] && scomp == dcomp) {
95  mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity());
96  } else {
97  mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, src_ghost, nghost, geom.periodicity());
98  }
99  }
100  else if (smf.size() == 2)
101  {
102  BL_ASSERT(smf[0]->boxArray() == smf[1]->boxArray());
103  MF raii;
104  MF * dmf;
105  int destcomp;
106  bool sameba;
107  if (mf.boxArray() == smf[0]->boxArray() &&
108  mf.DistributionMap() == smf[0]->DistributionMap())
109  {
110  dmf = &mf;
111  destcomp = dcomp;
112  sameba = true;
113  } else {
114  raii.define(smf[0]->boxArray(), smf[0]->DistributionMap(), ncomp, src_ghost,
115  MFInfo(), smf[0]->Factory());
116 
117  dmf = &raii;
118  destcomp = 0;
119  sameba = false;
120  }
121 
122  if ((dmf != smf[0] && dmf != smf[1]) || scomp != dcomp)
123  {
124  IntVect interp_ghost(0);
125  if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
126  interp_ghost = physbcf.fp1_src_ghost;
127  if (sameba) {
128  interp_ghost.min(nghost);
129  }
130  }
131 #ifdef AMREX_USE_OMP
132 #pragma omp parallel if (Gpu::notInLaunchRegion())
133 #endif
134  for (MFIter mfi(*dmf,TilingIfNotGPU()); mfi.isValid(); ++mfi)
135  {
136  const Box& bx = mfi.growntilebox(interp_ghost);
137  const Real t0 = stime[0];
138  const Real t1 = stime[1];
139  auto const sfab0 = smf[0]->array(mfi);
140  auto const sfab1 = smf[1]->array(mfi);
141  auto dfab = dmf->array(mfi);
142 
143  if (time == t0)
144  {
145  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
146  {
147  dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp);
148  });
149  }
150  else if (time == t1)
151  {
152  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
153  {
154  dfab(i,j,k,n+destcomp) = sfab1(i,j,k,n+scomp);
155  });
156  }
157  else if (! amrex::almostEqual(t0,t1))
158  {
159  Real alpha = (t1-time)/(t1-t0);
160  Real beta = (time-t0)/(t1-t0);
161  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
162  {
163  dfab(i,j,k,n+destcomp) = alpha*sfab0(i,j,k,n+scomp)
164  + beta*sfab1(i,j,k,n+scomp);
165  });
166  }
167  else
168  {
169  AMREX_HOST_DEVICE_PARALLEL_FOR_4D ( bx, ncomp, i, j, k, n,
170  {
171  dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp);
172  });
173  }
174  }
175  }
176 
177  if (sameba)
178  {
179  // Note that when sameba is true mf's BoxArray is nonoverlapping.
180  // So FillBoundary is safe.
181  mf.FillBoundary(dcomp, ncomp, nghost, geom.periodicity());
182  }
183  else
184  {
185  mf.ParallelCopy(*dmf, 0, dcomp, ncomp, src_ghost, nghost, geom.periodicity());
186  }
187  }
188  else {
189  amrex::Abort("FillPatchSingleLevel: high-order interpolation in time not implemented yet");
190  }
191 
192  physbcf(mf, dcomp, ncomp, nghost, time, bcfcomp);
193 }
194 
195 void FillPatchInterp (MultiFab& mf_fine_patch, int fcomp, MultiFab const& mf_crse_patch, int ccomp,
196  int ncomp, IntVect const& ng, const Geometry& cgeom, const Geometry& fgeom,
197  Box const& dest_domain, const IntVect& ratio,
198  MFInterpolater* mapper, const Vector<BCRec>& bcs, int bcscomp);
199 
200 template <typename MF, typename Interp>
201 std::enable_if_t<IsFabArray<MF>::value && !std::is_same_v<Interp,MFInterpolater>>
202 FillPatchInterp (MF& mf_fine_patch, int fcomp, MF const& mf_crse_patch, int ccomp,
203  int ncomp, IntVect const& ng, const Geometry& cgeom, const Geometry& fgeom,
204  Box const& dest_domain, const IntVect& ratio,
205  Interp* mapper, const Vector<BCRec>& bcs, int bcscomp)
206 {
207  BL_PROFILE("FillPatchInterp(Fab)");
208 
209  Box const& cdomain = amrex::convert(cgeom.Domain(), mf_fine_patch.ixType());
210  int idummy=0;
211 #ifdef AMREX_USE_OMP
212 #pragma omp parallel if (Gpu::notInLaunchRegion())
213 #endif
214  {
215  Vector<BCRec> bcr(ncomp);
216  for (MFIter mfi(mf_fine_patch); mfi.isValid(); ++mfi)
217  {
218  auto& sfab = mf_crse_patch[mfi];
219  const Box& sbx = sfab.box();
220 
221  auto& dfab = mf_fine_patch[mfi];
222  Box const& dbx = amrex::grow(mfi.validbox(),ng) & dest_domain;
223 
224  amrex::setBC(sbx,cdomain,bcscomp,0,ncomp,bcs,bcr);
225  mapper->interp(sfab, ccomp, dfab, fcomp, ncomp, dbx, ratio,
226  cgeom, fgeom, bcr, idummy, idummy, RunOn::Gpu);
227  }
228  }
229 }
230 
231 template <typename MF>
232 std::enable_if_t<IsFabArray<MF>::value>
233 FillPatchInterp (MF& mf_fine_patch, int fcomp, MF const& mf_crse_patch, int ccomp,
234  int ncomp, IntVect const& ng, const Geometry& cgeom, const Geometry& fgeom,
235  Box const& dest_domain, const IntVect& ratio,
236  InterpBase* mapper, const Vector<BCRec>& bcs, int bcscomp)
237 {
238  if (dynamic_cast<MFInterpolater*>(mapper)) {
239  FillPatchInterp(mf_fine_patch, fcomp, mf_crse_patch, ccomp,
240  ncomp, ng, cgeom, fgeom, dest_domain, ratio,
241  static_cast<MFInterpolater*>(mapper), bcs, bcscomp);
242  } else if (dynamic_cast<Interpolater*>(mapper)) {
243  FillPatchInterp(mf_fine_patch, fcomp, mf_crse_patch, ccomp,
244  ncomp, ng, cgeom, fgeom, dest_domain, ratio,
245  static_cast<Interpolater*>(mapper), bcs, bcscomp);
246  } else {
247  amrex::Abort("FillPatchInterp: unknown InterpBase");
248  }
249 }
250 
251 template <typename MF, typename iMF, typename Interp>
252 std::enable_if_t<IsFabArray<MF>::value && !std::is_same_v<Interp,MFInterpolater>>
253 InterpFace (Interp *interp,
254  MF const& mf_crse_patch, int crse_comp,
255  MF& mf_refined_patch, int fine_comp,
256  int ncomp, const IntVect& ratio,
257  const iMF& solve_mask, const Geometry& crse_geom, const Geometry& fine_geom,
258  int bcscomp, RunOn gpu_or_cpu,
259  const Vector<BCRec>& bcs)
260 {
261  Vector<BCRec> bcr(ncomp);
262  Box const& cdomain = amrex::convert(crse_geom.Domain(), mf_crse_patch.ixType());
263  for (MFIter mfi(mf_refined_patch);mfi.isValid(); ++mfi)
264  {
265  auto& sfab = mf_crse_patch[mfi];
266  const Box& sbx = sfab.box();
267  auto& dfab = mf_refined_patch[mfi];
268  Box const& dbx = dfab.box();
269  auto& ifab = solve_mask[mfi];
270  amrex::setBC(sbx,cdomain,bcscomp,0,ncomp,bcs,bcr);
271  interp->interp_face(sfab,crse_comp,dfab,fine_comp,ncomp,
272  dbx, ratio, ifab, crse_geom, fine_geom,
273  bcr, bcscomp, gpu_or_cpu);
274  }
275 }
276 
277 template <typename MF, typename iMF>
278 std::enable_if_t<IsFabArray<MF>::value>
280  MF const& mf_crse_patch, int crse_comp,
281  MF& mf_refined_patch, int fine_comp,
282  int ncomp, const IntVect& ratio,
283  const iMF& solve_mask, const Geometry& crse_geom, const Geometry& fine_geom,
284  int bccomp, RunOn gpu_or_cpu,
285  const Vector<BCRec>& bcs)
286 {
287  if (dynamic_cast<MFInterpolater*>(interp)){
288  InterpFace(static_cast<MFInterpolater*>(interp),
289  mf_crse_patch, crse_comp,mf_refined_patch, fine_comp,
290  ncomp, ratio, solve_mask, crse_geom, fine_geom, bccomp,
291  gpu_or_cpu, bcs);
292  }
293  else if (dynamic_cast<Interpolater*>(interp)){
294  InterpFace(static_cast<Interpolater*>(interp),
295  mf_crse_patch, crse_comp,mf_refined_patch, fine_comp,
296  ncomp, ratio, solve_mask, crse_geom, fine_geom, bccomp,
297  gpu_or_cpu, bcs);
298  }
299  else {
300  amrex::Abort("InterpFace: unknown InterpBase");
301  }
302 }
303 
304 
305 namespace detail {
306 
307 // ======== FArrayBox
308 
309  template <typename MF,
310  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
311  FArrayBox>,
312  int> = 0>
313  MF make_mf_crse_patch (FabArrayBase::FPinfo const& fpc, int ncomp)
314  {
315  MF mf_crse_patch(fpc.ba_crse_patch, fpc.dm_patch, ncomp, 0, MFInfo(),
316  *fpc.fact_crse_patch);
317  return mf_crse_patch;
318  }
319 
320  template <typename MF,
321  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
322  FArrayBox>,
323  int> = 0>
324  MF make_mf_crse_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type)
325  {
326  MF mf_crse_patch(amrex::convert(fpc.ba_crse_patch, idx_type), fpc.dm_patch,
327  ncomp, 0, MFInfo(), *fpc.fact_crse_patch);
328  return mf_crse_patch;
329  }
330 
331  template <typename MF,
332  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
333  FArrayBox>,
334  int> = 0>
335  MF make_mf_fine_patch (FabArrayBase::FPinfo const& fpc, int ncomp)
336  {
337  MF mf_fine_patch(fpc.ba_fine_patch, fpc.dm_patch, ncomp, 0, MFInfo(),
338  *fpc.fact_fine_patch);
339  return mf_fine_patch;
340  }
341 
342  template <typename MF,
343  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
344  FArrayBox>,
345  int> = 0>
346  MF make_mf_fine_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type)
347  {
348  MF mf_fine_patch(amrex::convert(fpc.ba_fine_patch, idx_type), fpc.dm_patch,
349  ncomp, 0, MFInfo(), *fpc.fact_fine_patch);
350  return mf_fine_patch;
351  }
352 
353  template <typename MF,
354  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
355  FArrayBox>,
356  int> = 0>
357  MF make_mf_refined_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type, IntVect ratio)
358  {
359  MF mf_refined_patch(amrex::convert( amrex::refine( amrex::coarsen(fpc.ba_fine_patch, ratio), ratio), idx_type),
360  fpc.dm_patch, ncomp, 0, MFInfo(), *fpc.fact_fine_patch);
361  return mf_refined_patch;
362  }
363 
364  template <typename MF,
365  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
366  FArrayBox>,
367  int> = 0>
368  MF make_mf_crse_mask (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type, IntVect ratio)
369  {
370  MF mf_crse_mask(amrex::convert(amrex::coarsen(fpc.ba_fine_patch, ratio), idx_type),
371  fpc.dm_patch, ncomp, 0, MFInfo(), *fpc.fact_fine_patch);
372  return mf_crse_mask;
373  }
374 
375  template <typename MF,
376  std::enable_if_t<std::is_same_v<typename MF::FABType::value_type,
377  FArrayBox>,
378  int> = 0>
379  void mf_set_domain_bndry (MF &mf, Geometry const & geom)
380  {
381  mf.setDomainBndry(std::numeric_limits<Real>::quiet_NaN(), geom);
382  }
383 
384 
385 // ======== Not FArrayBox
386 
387  template <typename MF,
388  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
389  FArrayBox>,
390  int> = 0>
391  MF make_mf_crse_patch (FabArrayBase::FPinfo const& fpc, int ncomp)
392  {
393  return MF(fpc.ba_crse_patch, fpc.dm_patch, ncomp, 0);
394  }
395 
396  template <typename MF,
397  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
398  FArrayBox>,
399  int> = 0>
400  MF make_mf_crse_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type)
401  {
402  return MF(amrex::convert(fpc.ba_crse_patch, idx_type), fpc.dm_patch, ncomp, 0);
403  }
404 
405  template <typename MF,
406  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
407  FArrayBox>,
408  int> = 0>
409  MF make_mf_fine_patch (FabArrayBase::FPinfo const& fpc, int ncomp)
410  {
411  return MF(fpc.ba_fine_patch, fpc.dm_patch, ncomp, 0);
412  }
413 
414  template <typename MF,
415  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
416  FArrayBox>,
417  int> = 0>
418  MF make_mf_fine_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type)
419  {
420  return MF(amrex::convert(fpc.ba_fine_patch, idx_type), fpc.dm_patch, ncomp, 0);
421  }
422 
423  template <typename MF,
424  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
425  FArrayBox>,
426  int> = 0>
427  MF make_mf_refined_patch (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type, IntVect ratio)
428  {
429  return MF(amrex::convert( amrex::refine( amrex::coarsen(fpc.ba_fine_patch, ratio), ratio), idx_type), fpc.dm_patch, ncomp, 0);
430  }
431 
432  template <typename MF,
433  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
434  FArrayBox>,
435  int> = 0>
436  MF make_mf_crse_mask (FabArrayBase::FPinfo const& fpc, int ncomp, IndexType idx_type, IntVect ratio)
437  {
438  return MF(amrex::convert(amrex::coarsen(fpc.ba_fine_patch, ratio), idx_type), fpc.dm_patch, ncomp, 0);
439  }
440 
441  template <typename MF,
442  std::enable_if_t<!std::is_same_v<typename MF::FABType::value_type,
443  FArrayBox>,
444  int> = 0>
445  void mf_set_domain_bndry (MF &/*mf*/, Geometry const & /*geom*/)
446  {
447  // nothing
448  }
449 
450  template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
451  std::enable_if_t<IsFabArray<MF>::value,int>
452  FillPatchTwoLevels_doit (MF& mf, IntVect const& nghost, Real time,
453  const Vector<MF*>& cmf, const Vector<Real>& ct,
454  const Vector<MF*>& fmf, const Vector<Real>& ft,
455  int scomp, int dcomp, int ncomp,
456  const Geometry& cgeom, const Geometry& fgeom,
457  BC& cbc, int cbccomp,
458  BC& fbc, int fbccomp,
459  const IntVect& ratio,
460  Interp* mapper,
461  const Vector<BCRec>& bcs, int bcscomp,
462  const PreInterpHook& pre_interp,
463  const PostInterpHook& post_interp,
464  EB2::IndexSpace const* index_space,
465  bool return_error_code = false)
466  {
467  BL_PROFILE("FillPatchTwoLevels");
468 
469  int success_code = return_error_code ? 0 : -1;
470  int failure_code = 1;
471 
472  if (nghost.max() > 0 || mf.getBDKey() != fmf[0]->getBDKey())
473  {
474  const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio);
475 
476  // Test for Face-centered data
477  if ( AMREX_D_TERM( mf.ixType().nodeCentered(0),
478  + mf.ixType().nodeCentered(1),
479  + mf.ixType().nodeCentered(2) ) == 1 )
480  {
481  if ( !dynamic_cast<Interpolater*>(mapper) ){
482  amrex::Abort("This interpolater has not yet implemented a version for face-based data");
483  }
484 
485  // Convert to cell-centered MF meta-data for FPInfo.
486  MF mf_cc_dummy( amrex::convert(mf.boxArray(), IntVect::TheZeroVector()),
487  mf.DistributionMap(), ncomp, nghost, MFInfo().SetAlloc(false) );
488  MF fmf_cc_dummy( amrex::convert(fmf[0]->boxArray(), IntVect::TheZeroVector()),
489  fmf[0]->DistributionMap(), ncomp, nghost, MFInfo().SetAlloc(false) );
490 
491  const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(fmf_cc_dummy, mf_cc_dummy,
492  nghost,
493  coarsener,
494  fgeom,
495  cgeom,
496  index_space);
497 
498  if ( ! fpc.ba_crse_patch.empty())
499  {
500  if (return_error_code) {
501  BoxArray const& cba = amrex::convert(cmf[0]->boxArray(), IntVect(0));
502  if (!cba.contains(fpc.ba_crse_patch,cgeom.periodicity())) {
503  return failure_code;
504  }
505  }
506 
507  MF mf_crse_patch = make_mf_crse_patch<MF> (fpc, ncomp, mf.boxArray().ixType());
508  // Must make sure fine exists under needed coarse faces.
509  // It stores values for the final (interior) interpolation,
510  // which is done from this fine MF that's been partially filled
511  // (with only faces overlying coarse having valid data).
512  MF mf_refined_patch = make_mf_refined_patch<MF> (fpc, ncomp, mf.boxArray().ixType(), ratio);
513  auto solve_mask = make_mf_crse_mask<iMultiFab>(fpc, ncomp, mf.boxArray().ixType(), ratio);
514 
515  mf_set_domain_bndry(mf_crse_patch, cgeom);
516  if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
517  cbc.fp1_src_ghost = cbc.cghost;
518  }
519  FillPatchSingleLevel(mf_crse_patch, time, cmf, ct, scomp, 0, ncomp,
520  cgeom, cbc, cbccomp);
521 
522  mf_set_domain_bndry(mf_refined_patch, fgeom);
523  if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
524  fbc.fp1_src_ghost = IntVect(0);
525  }
526  FillPatchSingleLevel(mf_refined_patch, time, fmf, ft, scomp, 0, ncomp,
527  fgeom, fbc, fbccomp);
528 
529  // Aliased MFs, used to allow CPC caching.
530  MF mf_known( amrex::coarsen(fmf[0]->boxArray(), ratio), fmf[0]->DistributionMap(),
531  ncomp, nghost, MFInfo().SetAlloc(false) );
532  MF mf_solution( amrex::coarsen(mf_refined_patch.boxArray(), ratio), mf_refined_patch.DistributionMap(),
533  ncomp, 0, MFInfo().SetAlloc(false) );
534 
535  const FabArrayBase::CPC mask_cpc( mf_solution, IntVect::TheZeroVector(),
536  mf_known, IntVect::TheZeroVector(),
537  cgeom.periodicity());
538 
539  solve_mask.setVal(1); // Values to solve.
540  solve_mask.setVal(0, mask_cpc, 0, 1); // Known values.
541 
542  detail::call_interp_hook(pre_interp, mf_crse_patch, 0, ncomp);
543 
544  InterpFace(mapper, mf_crse_patch, 0, mf_refined_patch, 0, ncomp,
545  ratio, solve_mask, cgeom, fgeom, bcscomp, RunOn::Gpu, bcs);
546 
547  detail::call_interp_hook(post_interp, mf_refined_patch, 0, ncomp);
548 
549  bool aliasing = false;
550  for (auto const& fmf_a : fmf) {
551  aliasing = aliasing || (&mf == fmf_a);
552  }
553  if (aliasing) {
554  mf.ParallelCopyToGhost(mf_refined_patch, 0, dcomp, ncomp,
555  IntVect{0}, nghost);
556  } else {
557  mf.ParallelCopy(mf_refined_patch, 0, dcomp, ncomp,
558  IntVect{0}, nghost);
559  }
560  }
561  }
562  else
563  {
564  const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(*fmf[0], mf,
565  nghost,
566  coarsener,
567  fgeom,
568  cgeom,
569  index_space);
570 
571  if ( ! fpc.ba_crse_patch.empty())
572  {
573  if (return_error_code) {
574  BoxArray const& cba = cmf[0]->boxArray();
575  if (!cba.contains(fpc.ba_crse_patch,cgeom.periodicity())) {
576  return failure_code;
577  }
578  }
579 
580  MF mf_crse_patch = make_mf_crse_patch<MF>(fpc, ncomp);
581  mf_set_domain_bndry (mf_crse_patch, cgeom);
582 
583  if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
584  cbc.fp1_src_ghost = cbc.cghost;
585  }
586  FillPatchSingleLevel(mf_crse_patch, time, cmf, ct, scomp, 0, ncomp, cgeom, cbc, cbccomp);
587 
588  MF mf_fine_patch = make_mf_fine_patch<MF>(fpc, ncomp);
589 
590  detail::call_interp_hook(pre_interp, mf_crse_patch, 0, ncomp);
591 
592  Box fdomain_g( amrex::convert(fgeom.Domain(),mf.ixType()) );
593  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
594  if (fgeom.isPeriodic(i)) {
595  fdomain_g.grow(i, nghost[i]);
596  } else {
597  if constexpr (std::is_same_v
599  fdomain_g.grow(i, fbc.nghost_outside_domain[i]);
600  }
601  }
602  }
603  FillPatchInterp(mf_fine_patch, 0, mf_crse_patch, 0,
604  ncomp, IntVect(0), cgeom, fgeom,
605  fdomain_g, ratio, mapper, bcs, bcscomp);
606 
607  detail::call_interp_hook(post_interp, mf_fine_patch, 0, ncomp);
608 
609  mf.ParallelCopy(mf_fine_patch, 0, dcomp, ncomp, IntVect{0}, nghost);
610  }
611  }
612  }
613 
614  if constexpr(std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
615  fbc.fp1_src_ghost = IntVect(0);
616  }
617  FillPatchSingleLevel(mf, nghost, time, fmf, ft, scomp, dcomp, ncomp,
618  fgeom, fbc, fbccomp);
619 
620  return success_code;
621  }
622 
623  template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
624  std::enable_if_t<IsFabArray<MF>::value>
625  FillPatchTwoLevels_doit (Array<MF*, AMREX_SPACEDIM> const& mf, IntVect const& nghost, Real time,
626  const Vector<Array<MF*, AMREX_SPACEDIM> >& cmf, const Vector<Real>& ct,
627  const Vector<Array<MF*, AMREX_SPACEDIM> >& fmf, const Vector<Real>& ft,
628  int scomp, int dcomp, int ncomp,
629  const Geometry& cgeom, const Geometry& fgeom,
632  const IntVect& ratio,
633  Interp* mapper,
634  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, const Array<int, AMREX_SPACEDIM>& bcscomp,
635  const PreInterpHook& pre_interp,
636  const PostInterpHook& post_interp,
637  EB2::IndexSpace const* index_space)
638  {
639  BL_PROFILE("FillPatchTwoLevels (Array<MF*>)");
640 
641  using FAB = typename MF::FABType::value_type;
642  using iFAB = typename iMultiFab::FABType::value_type;
643 
644  AMREX_ASSERT(AMREX_D_TERM(mf[0]->ixType().nodeCentered(0),
645  && mf[1]->ixType().nodeCentered(1),
646  && mf[2]->ixType().nodeCentered(2)));
647 
648  // These need to be true: (ba[0] == ba[1] == ba[2]) & (dm[0] == dm[1] == dm[2]).
649  // Debatable whether these are required, or will be enforced elsewhere prior to this func.
651  && BoxArray::SameRefs(mf[0]->boxArray(), mf[1]->boxArray()),
652  && BoxArray::SameRefs(mf[0]->boxArray(), mf[2]->boxArray())));
653 /*
654  AMREX_ASSERT(AMREX_D_TERM(true,
655  && DistributionMapping::SameRefs(mf[0]->DistributionMap(), mf[1]->DistributionMap()),
656  && DistributionMapping::SameRefs(mf[0]->DistributionMap(), mf[2]->DistributionMap())));
657 */
658 
659 
660  // Test all of them?
661  if (nghost.max() > 0 || mf[0]->getBDKey() != fmf[0][0]->getBDKey())
662  {
663  const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio);
664 
665  // Convert to cell-centered MF meta-data for FPInfo.
666  MF mf_cc_dummy( amrex::convert(mf[0]->boxArray(), IntVect::TheZeroVector()),
667  mf[0]->DistributionMap(), ncomp, nghost, MFInfo().SetAlloc(false) );
668  MF fmf_cc_dummy( amrex::convert(fmf[0][0]->boxArray(), IntVect::TheZeroVector()),
669  fmf[0][0]->DistributionMap(), ncomp, nghost, MFInfo().SetAlloc(false) );
670 
671  const FabArrayBase::FPinfo& fpc = FabArrayBase::TheFPinfo(fmf_cc_dummy, mf_cc_dummy,
672  nghost,
673  coarsener,
674  fgeom,
675  cgeom,
676  index_space);
677 
678  if ( !fpc.ba_crse_patch.empty() )
679  {
680  Array<MF, AMREX_SPACEDIM> mf_crse_patch;
681  Array<MF, AMREX_SPACEDIM> mf_refined_patch;
683 
684  for (int d=0; d<AMREX_SPACEDIM; ++d)
685  {
686  mf_crse_patch[d] = make_mf_crse_patch<MF> (fpc, ncomp, mf[d]->boxArray().ixType());
687  mf_refined_patch[d] = make_mf_refined_patch<MF> (fpc, ncomp, mf[d]->boxArray().ixType(), ratio);
688  solve_mask[d] = make_mf_crse_mask<iMultiFab>(fpc, ncomp, mf[d]->boxArray().ixType(), ratio);
689 
690  mf_set_domain_bndry(mf_crse_patch[d], cgeom);
691  Vector<MF*> cmf_time;
692  for (const auto & mfab : cmf)
693  { cmf_time.push_back(mfab[d]); }
694 
695  FillPatchSingleLevel(mf_crse_patch[d], time, cmf_time, ct, scomp, 0, ncomp,
696  cgeom, cbc[d], cbccomp[d]);
697 
698  mf_set_domain_bndry(mf_refined_patch[d], fgeom);
699  Vector<MF*> fmf_time;
700  for (const auto & mfab : fmf)
701  { fmf_time.push_back(mfab[d]); }
702 
703  FillPatchSingleLevel(mf_refined_patch[d], time, fmf_time, ft, scomp, 0, ncomp,
704  fgeom, fbc[d], fbccomp[d]);
705 
706 
707  // Aliased MFs, used to allow CPC caching.
708  MF mf_known( amrex::coarsen(fmf[0][d]->boxArray(), ratio), fmf[0][d]->DistributionMap(),
709  ncomp, nghost, MFInfo().SetAlloc(false) );
710  MF mf_solution( amrex::coarsen(mf_refined_patch[d].boxArray(), ratio), mf_refined_patch[d].DistributionMap(),
711  ncomp, 0, MFInfo().SetAlloc(false) );
712 
713  const FabArrayBase::CPC mask_cpc( mf_solution, IntVect::TheZeroVector(),
714  mf_known, IntVect::TheZeroVector(),
715  cgeom.periodicity() );
716 
717  solve_mask[d].setVal(1); // Values to solve.
718  solve_mask[d].setVal(0, mask_cpc, 0, 1); // Known values.
719  }
720 
721  int idummy=0;
722 #ifdef AMREX_USE_OMP
723 // bool cc = fpc.ba_crse_patch.ixType().cellCentered();
724  bool cc = false; // can anything be done to allow threading, or can the OpenMP just be removed?
725 #pragma omp parallel if (cc && Gpu::notInLaunchRegion() )
726 #endif
727  {
729  for (MFIter mfi(mf_refined_patch[0]); mfi.isValid(); ++mfi)
730  {
731  Array<FAB*, AMREX_SPACEDIM> sfab{ AMREX_D_DECL( &(mf_crse_patch[0][mfi]),
732  &(mf_crse_patch[1][mfi]),
733  &(mf_crse_patch[2][mfi]) )};
734  Array<FAB*, AMREX_SPACEDIM> dfab{ AMREX_D_DECL( &(mf_refined_patch[0][mfi]),
735  &(mf_refined_patch[1][mfi]),
736  &(mf_refined_patch[2][mfi]) )};
737  Array<iFAB*, AMREX_SPACEDIM> mfab{ AMREX_D_DECL( &(solve_mask[0][mfi]),
738  &(solve_mask[1][mfi]),
739  &(solve_mask[2][mfi]) )};
740 
741  const Box& sbx_cc = amrex::convert(sfab[0]->box(), IntVect::TheZeroVector());
742  const Box& dbx_cc = amrex::convert(dfab[0]->box(), IntVect::TheZeroVector());
743 
744  for (int d=0; d<AMREX_SPACEDIM; ++d)
745  {
746  Vector<BCRec> bcr_d(ncomp);
747 
748  amrex::setBC(sfab[d]->box(), amrex::convert(cgeom.Domain(), mf[d]->ixType()),
749  bcscomp[d],0,ncomp,bcs[d],bcr_d);
750 
751  for (int n=0; n<ncomp; ++n)
752  { bcr[n][d] = bcr_d[n]; }
753  }
754 
755  pre_interp(sfab, sbx_cc, 0, ncomp);
756 
757  mapper->interp_arr(sfab, 0, dfab, 0, ncomp, dbx_cc, ratio, mfab,
758  cgeom, fgeom, bcr, idummy, idummy, RunOn::Gpu);
759 
760  post_interp(dfab, dbx_cc, 0, ncomp);
761  }
762  }
763 
764  for (int d=0; d<AMREX_SPACEDIM; ++d)
765  {
766  bool aliasing = false;
767  for (auto const& fmf_a : fmf) {
768  aliasing = aliasing || (mf[d] == fmf_a[d]);
769  }
770  if (aliasing) {
771  mf[d]->ParallelCopyToGhost(mf_refined_patch[d], 0, dcomp, ncomp,
772  IntVect{0}, nghost);
773  } else {
774  mf[d]->ParallelCopy(mf_refined_patch[d], 0, dcomp, ncomp,
775  IntVect{0}, nghost);
776  }
777  }
778  }
779  }
780 
781  for (int d=0; d<AMREX_SPACEDIM; ++d)
782  {
783  Vector<MF*> fmf_time;
784  for (auto const& ffab : fmf)
785  { fmf_time.push_back(ffab[d]); }
786 
787  FillPatchSingleLevel(*mf[d], nghost, time, fmf_time, ft, scomp, dcomp, ncomp,
788  fgeom, fbc[d], fbccomp[d]);
789  }
790  }
791 
792 } // namespace detail
793 
794 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
795 std::enable_if_t<IsFabArray<MF>::value>
796 FillPatchTwoLevels (MF& mf, IntVect const& nghost, Real time,
797  const Vector<MF*>& cmf, const Vector<Real>& ct,
798  const Vector<MF*>& fmf, const Vector<Real>& ft,
799  int scomp, int dcomp, int ncomp,
800  const Geometry& cgeom, const Geometry& fgeom,
801  BC& cbc, int cbccomp,
802  BC& fbc, int fbccomp,
803  const IntVect& ratio,
804  Interp* mapper,
805  const Vector<BCRec>& bcs, int bcscomp,
806  const PreInterpHook& pre_interp,
807  const PostInterpHook& post_interp)
808 {
809 #ifdef AMREX_USE_EB
810  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
811 #else
812  EB2::IndexSpace const* index_space = nullptr;
813 #endif
814  detail::FillPatchTwoLevels_doit(mf,nghost,time,cmf,ct,fmf,ft,
815  scomp,dcomp,ncomp,cgeom,fgeom,
816  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
817  pre_interp,post_interp,index_space);
818 }
819 
820 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
821 std::enable_if_t<IsFabArray<MF>::value>
822 FillPatchTwoLevels (MF& mf, Real time,
823  const Vector<MF*>& cmf, const Vector<Real>& ct,
824  const Vector<MF*>& fmf, const Vector<Real>& ft,
825  int scomp, int dcomp, int ncomp,
826  const Geometry& cgeom, const Geometry& fgeom,
827  BC& cbc, int cbccomp,
828  BC& fbc, int fbccomp,
829  const IntVect& ratio,
830  Interp* mapper,
831  const Vector<BCRec>& bcs, int bcscomp,
832  const PreInterpHook& pre_interp,
833  const PostInterpHook& post_interp)
834 {
835 #ifdef AMREX_USE_EB
836  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
837 #else
838  EB2::IndexSpace const* index_space = nullptr;
839 #endif
840 
841  detail::FillPatchTwoLevels_doit(mf,mf.nGrowVect(),time,cmf,ct,fmf,ft,
842  scomp,dcomp,ncomp,cgeom,fgeom,
843  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
844  pre_interp,post_interp,index_space);
845 }
846 
847 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
848 std::enable_if_t<IsFabArray<MF>::value>
849 FillPatchTwoLevels (Array<MF*, AMREX_SPACEDIM> const& mf, IntVect const& nghost, Real time,
850  const Vector<Array<MF*, AMREX_SPACEDIM> >& cmf, const Vector<Real>& ct,
851  const Vector<Array<MF*, AMREX_SPACEDIM> >& fmf, const Vector<Real>& ft,
852  int scomp, int dcomp, int ncomp,
853  const Geometry& cgeom, const Geometry& fgeom,
856  const IntVect& ratio,
857  Interp* mapper,
858  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, const Array<int, AMREX_SPACEDIM>& bcscomp,
859  const PreInterpHook& pre_interp,
860  const PostInterpHook& post_interp)
861 {
862 #ifdef AMREX_USE_EB
863  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
864 #else
865  EB2::IndexSpace const* index_space = nullptr;
866 #endif
867 
868  detail::FillPatchTwoLevels_doit(mf,nghost,time,cmf,ct,fmf,ft,
869  scomp,dcomp,ncomp,cgeom,fgeom,
870  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
871  pre_interp,post_interp,index_space);
872 }
873 
874 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
875 std::enable_if_t<IsFabArray<MF>::value>
876 FillPatchTwoLevels (Array<MF*, AMREX_SPACEDIM> const& mf, IntVect const& nghost, Real time,
877  const Vector<Array<MF*, AMREX_SPACEDIM> >& cmf, const Vector<Real>& ct,
878  const Vector<Array<MF*, AMREX_SPACEDIM> >& fmf, const Vector<Real>& ft,
879  int scomp, int dcomp, int ncomp,
880  const Geometry& cgeom, const Geometry& fgeom,
881  Array<BC, AMREX_SPACEDIM>& cbc, int cbccomp,
882  Array<BC, AMREX_SPACEDIM>& fbc, int fbccomp,
883  const IntVect& ratio,
884  Interp* mapper,
885  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, int bcscomp,
886  const PreInterpHook& pre_interp,
887  const PostInterpHook& post_interp)
888 {
889 #ifdef AMREX_USE_EB
890  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
891 #else
892  EB2::IndexSpace const* index_space = nullptr;
893 #endif
894 
895  Array<int, AMREX_SPACEDIM> cbccomp_arr = {AMREX_D_DECL(cbccomp,cbccomp,cbccomp)};
896  Array<int, AMREX_SPACEDIM> fbccomp_arr = {AMREX_D_DECL(fbccomp,fbccomp,fbccomp)};
897  Array<int, AMREX_SPACEDIM> bcscomp_arr = {AMREX_D_DECL(bcscomp,bcscomp,bcscomp)};
898 
899  detail::FillPatchTwoLevels_doit(mf,nghost,time,cmf,ct,fmf,ft,
900  scomp,dcomp,ncomp,cgeom,fgeom,
901  cbc,cbccomp_arr,fbc,fbccomp_arr,ratio,mapper,bcs,bcscomp_arr,
902  pre_interp,post_interp,index_space);
903 }
904 
905 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
906 std::enable_if_t<IsFabArray<MF>::value>
908  const Vector<Array<MF*, AMREX_SPACEDIM> >& cmf, const Vector<Real>& ct,
909  const Vector<Array<MF*, AMREX_SPACEDIM> >& fmf, const Vector<Real>& ft,
910  int scomp, int dcomp, int ncomp,
911  const Geometry& cgeom, const Geometry& fgeom,
912  Array<BC, AMREX_SPACEDIM>& cbc, int cbccomp,
913  Array<BC, AMREX_SPACEDIM>& fbc, int fbccomp,
914  const IntVect& ratio,
915  Interp* mapper,
916  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, int bcscomp,
917  const PreInterpHook& pre_interp,
918  const PostInterpHook& post_interp)
919 {
920 #ifdef AMREX_USE_EB
921  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
922 #else
923  EB2::IndexSpace const* index_space = nullptr;
924 #endif
925 
926  Array<int, AMREX_SPACEDIM> cbccomp_arr = {AMREX_D_DECL(cbccomp,cbccomp,cbccomp)};
927  Array<int, AMREX_SPACEDIM> fbccomp_arr = {AMREX_D_DECL(fbccomp,fbccomp,fbccomp)};
928  Array<int, AMREX_SPACEDIM> bcscomp_arr = {AMREX_D_DECL(bcscomp,bcscomp,bcscomp)};
929 
930  detail::FillPatchTwoLevels_doit(mf,mf[0]->nGrowVect(),time,cmf,ct,fmf,ft,
931  scomp,dcomp,ncomp,cgeom,fgeom,
932  cbc,cbccomp_arr,fbc,fbccomp_arr,ratio,mapper,bcs,bcscomp_arr,
933  pre_interp,post_interp,index_space);
934 }
935 
936 #ifdef AMREX_USE_EB
937 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
938 std::enable_if_t<IsFabArray<MF>::value>
939 FillPatchTwoLevels (MF& mf, IntVect const& nghost, Real time,
940  const EB2::IndexSpace& index_space,
941  const Vector<MF*>& cmf, const Vector<Real>& ct,
942  const Vector<MF*>& fmf, const Vector<Real>& ft,
943  int scomp, int dcomp, int ncomp,
944  const Geometry& cgeom, const Geometry& fgeom,
945  BC& cbc, int cbccomp,
946  BC& fbc, int fbccomp,
947  const IntVect& ratio,
948  Interp* mapper,
949  const Vector<BCRec>& bcs, int bcscomp,
950  const PreInterpHook& pre_interp,
951  const PostInterpHook& post_interp)
952 {
953  detail::FillPatchTwoLevels_doit(mf,nghost,time,cmf,ct,fmf,ft,
954  scomp,dcomp,ncomp,cgeom,fgeom,
955  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
956  pre_interp,post_interp,&index_space);
957 }
958 
959 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
960 std::enable_if_t<IsFabArray<MF>::value>
961 FillPatchTwoLevels (MF& mf, Real time,
962  const EB2::IndexSpace& index_space,
963  const Vector<MF*>& cmf, const Vector<Real>& ct,
964  const Vector<MF*>& fmf, const Vector<Real>& ft,
965  int scomp, int dcomp, int ncomp,
966  const Geometry& cgeom, const Geometry& fgeom,
967  BC& cbc, int cbccomp,
968  BC& fbc, int fbccomp,
969  const IntVect& ratio,
970  Interp* mapper,
971  const Vector<BCRec>& bcs, int bcscomp,
972  const PreInterpHook& pre_interp,
973  const PostInterpHook& post_interp)
974 {
975  detail::FillPatchTwoLevels_doit(mf,mf.nGrowVect(),time,cmf,ct,fmf,ft,
976  scomp,dcomp,ncomp,cgeom,fgeom,
977  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
978  pre_interp,post_interp,&index_space);
979 }
980 #endif
981 
982 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
983 std::enable_if_t<IsFabArray<MF>::value>
984 InterpFromCoarseLevel (MF& mf, Real time,
985  const MF& cmf, int scomp, int dcomp, int ncomp,
986  const Geometry& cgeom, const Geometry& fgeom,
987  BC& cbc, int cbccomp,
988  BC& fbc, int fbccomp,
989  const IntVect& ratio,
990  Interp* mapper,
991  const Vector<BCRec>& bcs, int bcscomp,
992  const PreInterpHook& pre_interp,
993  const PostInterpHook& post_interp)
994 {
995  InterpFromCoarseLevel(mf,mf.nGrowVect(),time,cmf,scomp,dcomp,ncomp,cgeom,fgeom,
996  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
997  pre_interp,post_interp);
998 }
999 
1000 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
1001 std::enable_if_t<IsFabArray<MF>::value>
1003  const Array<MF*, AMREX_SPACEDIM>& cmf, int scomp, int dcomp, int ncomp,
1004  const Geometry& cgeom, const Geometry& fgeom,
1005  Array<BC, AMREX_SPACEDIM>& cbc, int cbccomp,
1006  Array<BC, AMREX_SPACEDIM>& fbc, int fbccomp,
1007  const IntVect& ratio,
1008  Interp* mapper,
1009  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, int bcscomp,
1010  const PreInterpHook& pre_interp,
1011  const PostInterpHook& post_interp)
1012 {
1013  InterpFromCoarseLevel(mf,mf[0]->nGrowVect(),time,cmf,scomp,dcomp,ncomp,cgeom,fgeom,
1014  cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
1015  pre_interp,post_interp);
1016 }
1017 
1018 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
1019 std::enable_if_t<IsFabArray<MF>::value>
1020 InterpFromCoarseLevel (MF& mf, IntVect const& nghost, Real time,
1021  const MF& cmf, int scomp, int dcomp, int ncomp,
1022  const Geometry& cgeom, const Geometry& fgeom,
1023  BC& cbc, int cbccomp,
1024  BC& fbc, int fbccomp,
1025  const IntVect& ratio,
1026  Interp* mapper,
1027  const Vector<BCRec>& bcs, int bcscomp,
1028  const PreInterpHook& pre_interp,
1029  const PostInterpHook& post_interp)
1030 {
1031  BL_PROFILE("InterpFromCoarseLevel");
1032 
1033  using FAB = typename MF::FABType::value_type;
1034 
1035  const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio);
1036 
1037  const BoxArray& ba = mf.boxArray();
1038  const DistributionMapping& dm = mf.DistributionMap();
1039 
1040  const IndexType& typ = ba.ixType();
1041 
1042  BL_ASSERT(typ == cmf.boxArray().ixType());
1043 
1044  Box fdomain_g( amrex::convert(fgeom.Domain(),mf.ixType()) );
1045  for (int i = 0; i < AMREX_SPACEDIM; ++i) {
1046  if (fgeom.isPeriodic(i)) {
1047  fdomain_g.grow(i, nghost[i]);
1048  } else {
1049  if constexpr (std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
1050  fdomain_g.grow(i, fbc.nghost_outside_domain[i]);
1051  }
1052  }
1053  }
1054 
1055  MF mf_crse_patch;
1056  IntVect send_ghost(0), recv_ghost(0);
1057  if constexpr (std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
1058  mf_crse_patch.define(amrex::coarsen(ba,ratio), dm, ncomp, fbc.src_ghost);
1059  send_ghost = fbc.cghost;
1060  recv_ghost = fbc.src_ghost;
1061  } else {
1062  BoxArray ba_crse_patch(ba.size());
1063  { // TODO: later we might want to cache this
1064  for (int i = 0, N = ba.size(); i < N; ++i)
1065  {
1066  Box bx = amrex::convert(amrex::grow(ba[i],nghost), typ);
1067  bx &= fdomain_g;
1068  ba_crse_patch.set(i, coarsener.doit(bx));
1069  }
1070  }
1071 
1072 #ifdef AMREX_USE_EB
1074  auto factory = makeEBFabFactory(cgeom, ba_crse_patch, dm, {0,0,0}, EBSupport::basic);
1075  mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0, MFInfo(), *factory);
1076  } else
1077 #endif
1078  {
1079  mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0);
1080  }
1081  detail::mf_set_domain_bndry (mf_crse_patch, cgeom);
1082  }
1083 
1084  mf_crse_patch.ParallelCopy(cmf, scomp, 0, ncomp, send_ghost, recv_ghost,
1085  cgeom.periodicity());
1086 
1087  cbc(mf_crse_patch, 0, ncomp, mf_crse_patch.nGrowVect(), time, cbccomp);
1088 
1089  detail::call_interp_hook(pre_interp, mf_crse_patch, 0, ncomp);
1090 
1091  FillPatchInterp(mf, dcomp, mf_crse_patch, 0, ncomp, nghost, cgeom, fgeom, fdomain_g,
1092  ratio, mapper, bcs, bcscomp);
1093 
1094 #ifdef AMREX_USE_OMP
1095 #pragma omp parallel if (Gpu::notInLaunchRegion())
1096 #endif
1097  for (MFIter mfi(mf); mfi.isValid(); ++mfi)
1098  {
1099  FAB& dfab = mf[mfi];
1100  Box dfab_bx = dfab.box();
1101  dfab_bx.grow(nghost-mf.nGrowVect());
1102  const Box& dbx = dfab_bx & fdomain_g;
1103 
1104  post_interp(dfab, dbx, dcomp, ncomp);
1105  }
1106 
1107  fbc(mf, dcomp, ncomp, nghost, time, fbccomp);
1108 }
1109 
1110 template <typename MF, typename BC, typename Interp, typename PreInterpHook, typename PostInterpHook>
1111 std::enable_if_t<IsFabArray<MF>::value>
1112 InterpFromCoarseLevel (Array<MF*, AMREX_SPACEDIM> const& mf, IntVect const& nghost, Real time,
1113  const Array<MF*, AMREX_SPACEDIM>& cmf, int scomp, int dcomp, int ncomp,
1114  const Geometry& cgeom, const Geometry& fgeom,
1115  Array<BC, AMREX_SPACEDIM>& cbc, int cbccomp,
1116  Array<BC, AMREX_SPACEDIM>& fbc, int fbccomp,
1117  const IntVect& ratio,
1118  Interp* mapper,
1119  const Array<Vector<BCRec>, AMREX_SPACEDIM>& bcs, int bcscomp,
1120  const PreInterpHook& pre_interp,
1121  const PostInterpHook& post_interp)
1122 {
1123  BL_PROFILE("InterpFromCoarseLevel(array)");
1124 
1125  using FAB = typename MF::FABType::value_type;
1126  using iFAB = typename iMultiFab::FABType::value_type;
1127 
1128  const InterpolaterBoxCoarsener& coarsener = mapper->BoxCoarsener(ratio);
1129  const BoxArray& ba = mf[0]->boxArray();
1130  const DistributionMapping& dm = mf[0]->DistributionMap();
1131 
1132  AMREX_ASSERT(AMREX_D_TERM(mf[0]->ixType().nodeCentered(0),
1133  && mf[1]->ixType().nodeCentered(1),
1134  && mf[2]->ixType().nodeCentered(2)));
1135 
1136  // These need to be true: (ba[0] == ba[1] == ba[2]) & (dm[0] == dm[1] == dm[2]).
1137  // Debatable whether these are required, or will be enforced elsewhere prior to this func.
1139  && BoxArray::SameRefs(mf[0]->boxArray(), mf[1]->boxArray()),
1140  && BoxArray::SameRefs(mf[0]->boxArray(), mf[2]->boxArray())));
1141 /*
1142  AMREX_ASSERT(AMREX_D_TERM(true,
1143  && DistributionMapping::SameRefs(mf[0]->DistributionMap(), mf[1]->DistributionMap()),
1144  && DistributionMapping::SameRefs(mf[0]->DistributionMap(), mf[2]->DistributionMap())));
1145 */
1146 
1147  // If needed, adjust to fully overlap the coarse cells.
1148  IntVect nghost_adj = nghost;
1149  for (int d=0; d<AMREX_SPACEDIM; ++d) {
1150  if (nghost[d] % ratio[d] != 0) {
1151  nghost_adj[d] += ratio[d] - (nghost[d] % ratio[d]);
1152  }
1153  }
1154 
1155  Array<MF*, AMREX_SPACEDIM> mf_local = mf;
1156  int dcomp_adj = dcomp;
1157  Array<std::unique_ptr<MF>, AMREX_SPACEDIM> mf_temp;
1158  if (! nghost.allGE(nghost_adj)) {
1159  for (int d=0; d<AMREX_SPACEDIM; ++d) {
1160  mf_temp[d] = std::make_unique<MF>(mf[d]->boxArray(),
1161  mf[d]->DistributionMap(), ncomp, nghost_adj);
1162  mf_local[d] = mf_temp[d].get();
1163  }
1164  dcomp_adj = 0;
1165  }
1166 
1167  // Create a cell-centered boxArray of the region to interp.
1168  // Convert this boxArray and domain as needed.
1169  Box fdomain = amrex::convert(fgeom.Domain(), IntVect::TheZeroVector());
1170  Box fdomain_g(fdomain);
1171  for (int d = 0; d < AMREX_SPACEDIM; ++d) {
1172  if (fgeom.isPeriodic(d)) {
1173  fdomain_g.grow(d,nghost_adj[d]);
1174  }
1175  }
1176 
1177  // Build patches, using domain to account for periodic bcs.
1178  BoxArray ba_crse_patch(ba.size());
1179  { // TODO: later we might want to cache this
1180  for (int i = 0, N = ba.size(); i < N; ++i)
1181  {
1182  Box bx = amrex::convert(amrex::grow(ba[i], nghost_adj), IntVect::TheZeroVector());
1183  bx &= fdomain_g;
1184  ba_crse_patch.set(i, coarsener.doit(bx));
1185  }
1186  }
1187 
1188  Array<MF, AMREX_SPACEDIM> mf_crse_patch;
1189  for (int d = 0; d<AMREX_SPACEDIM; ++d)
1190  {
1191  IndexType typ = mf[d]->boxArray().ixType();
1192  BoxArray ba_crse_idxed = amrex::convert(ba_crse_patch, typ);
1193 
1194 #ifdef AMREX_USE_EB
1195  auto crse_factory = makeEBFabFactory(cgeom, ba_crse_idxed, dm, {0,0,0}, EBSupport::basic);
1196  mf_crse_patch[d].define(ba_crse_idxed, dm, ncomp, 0, MFInfo(), *crse_factory);
1197 #else
1198  mf_crse_patch[d].define(ba_crse_idxed, dm, ncomp, 0);
1199 #endif
1200  detail::mf_set_domain_bndry(mf_crse_patch[d], cgeom);
1201 
1202  mf_crse_patch[d].ParallelCopy(*(cmf[d]), scomp, 0, ncomp, cgeom.periodicity());
1203  cbc[d](mf_crse_patch[d], 0, ncomp, mf_crse_patch[d].nGrowVect(), time, cbccomp);
1204  }
1205 
1206  int idummy1=0, idummy2=0;
1207 #ifdef AMREX_USE_OMP
1208 #pragma omp parallel if (Gpu::notInLaunchRegion())
1209 #endif
1210  {
1212 
1213  // Empty containers describing that all points must be solved (no mask).
1214  Array<iFAB*, AMREX_SPACEDIM> mfab{ AMREX_D_DECL( nullptr, nullptr, nullptr ) };
1215 
1216  for (MFIter mfi(mf_crse_patch[0]); mfi.isValid(); ++mfi)
1217  {
1218  Array<FAB*, AMREX_SPACEDIM> sfab{ AMREX_D_DECL( &(mf_crse_patch[0][mfi]),
1219  &(mf_crse_patch[1][mfi]),
1220  &(mf_crse_patch[2][mfi]) )};
1221  Array<FAB*, AMREX_SPACEDIM> dfab{ AMREX_D_DECL( &(*mf_local[0])[mfi],
1222  &(*mf_local[1])[mfi],
1223  &(*mf_local[2])[mfi] )};
1224 
1225  const Box& sbx_cc = amrex::convert(sfab[0]->box(), IntVect::TheZeroVector());
1226  Box dfab_cc = amrex::convert(dfab[0]->box(), IntVect::TheZeroVector());
1227  const Box& dbx_cc = dfab_cc & fdomain_g;
1228 
1229  for (int d=0; d<AMREX_SPACEDIM; ++d)
1230  {
1231  Vector<BCRec> bcr_d(ncomp);
1232 
1233  amrex::setBC(sfab[d]->box(),
1234  amrex::convert(cgeom.Domain(), sfab[d]->box().ixType()),
1235  bcscomp,0,ncomp,bcs[d],bcr_d);
1236 
1237  for (int n=0; n<ncomp; ++n)
1238  { bcr[n][d] = bcr_d[n]; }
1239  }
1240 
1241  pre_interp(sfab, sbx_cc, 0, ncomp);
1242 
1243  mapper->interp_arr(sfab, 0, dfab, 0, ncomp, dbx_cc, ratio, mfab,
1244  cgeom, fgeom, bcr, idummy1, idummy2, RunOn::Gpu);
1245 
1246  post_interp(dfab, dbx_cc, 0, ncomp);
1247  }
1248  }
1249 
1250  for (int d=0; d<AMREX_SPACEDIM; ++d)
1251  {
1252  if (mf[d] != mf_local[d]) {
1253  amrex::Copy(*mf[d], *mf_local[d], 0, dcomp_adj, ncomp, nghost);
1254  }
1255 
1256  fbc[d](*mf[d], dcomp, ncomp, nghost, time, fbccomp);
1257  }
1258 }
1259 
1260 template <typename MF, typename Interp>
1261 std::enable_if_t<IsFabArray<MF>::value>
1262 InterpFromCoarseLevel (MF& mf, IntVect const& nghost,
1263  IntVect const& nghost_outside_domain,
1264  const MF& cmf, int scomp, int dcomp, int ncomp,
1265  const Geometry& cgeom, const Geometry& fgeom,
1266  const IntVect& ratio, Interp* mapper,
1267  const Vector<BCRec>& bcs, int bcscomp)
1268 {
1269  PhysBCFunctUseCoarseGhost erfbc(cmf,nghost,nghost_outside_domain,ratio,mapper);
1270  InterpFromCoarseLevel(mf, nghost, Real(0.0), cmf, scomp, dcomp, ncomp,
1271  cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper,
1272  bcs, bcscomp);
1273 }
1274 
1275 template <typename MF>
1276 std::enable_if_t<IsFabArray<MF>::value>
1277 FillPatchSingleLevel (MF& mf, IntVect const& nghost, Real time,
1278  const Vector<MF*>& smf, IntVect const& snghost,
1279  const Vector<Real>& stime, int scomp, int dcomp, int ncomp,
1280  const Geometry& geom)
1281 {
1282  PhysBCFunctUseCoarseGhost erfbc(snghost);
1283  FillPatchSingleLevel(mf, nghost, time, smf, stime, scomp, dcomp, ncomp, geom,
1284  erfbc, 0);
1285 }
1286 
1287 template <typename MF, typename Interp>
1288 std::enable_if_t<IsFabArray<MF>::value>
1289 FillPatchTwoLevels (MF& mf, IntVect const& nghost,
1290  IntVect const& nghost_outside_domain, Real time,
1291  const Vector<MF*>& cmf, const Vector<Real>& ct,
1292  const Vector<MF*>& fmf, const Vector<Real>& ft,
1293  int scomp, int dcomp, int ncomp,
1294  const Geometry& cgeom, const Geometry& fgeom,
1295  const IntVect& ratio, Interp* mapper,
1296  const Vector<BCRec>& bcs, int bcscomp)
1297 {
1298  PhysBCFunctUseCoarseGhost erfbc(*cmf[0], nghost, nghost_outside_domain, ratio,
1299  mapper);
1300  FillPatchTwoLevels(mf, nghost, time, cmf, ct, fmf, ft, scomp, dcomp, ncomp,
1301  cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper,
1302  bcs, bcscomp);
1303 }
1304 
1305 template <typename MF, typename BC, typename Interp>
1306 std::enable_if_t<IsFabArray<MF>::value>
1307 FillPatchNLevels (MF& mf, int level, const IntVect& nghost, Real time,
1308  const Vector<Vector<MF*>>& smf, const Vector<Vector<Real>>& st,
1309  int scomp, int dcomp, int ncomp,
1310  const Vector<Geometry>& geom,
1311  Vector<BC>& bc, int bccomp,
1312  const Vector<IntVect>& ratio,
1313  Interp* mapper,
1314  const Vector<BCRec>& bcr, int bcrcomp)
1315 {
1316  BL_PROFILE("FillPatchNLevels");
1317 
1318  // FillPatchTwolevels relies on that mf's valid region is inside the
1319  // domain at periodic boundaries. But when we create coarsen boxarray
1320  // using mapper->CoarseBox, the resulting boxarray might violate the
1321  // requirement. If that happens, we need to create a second version of
1322  // the boxarray that is safe for FillPatchTwolevels.
1323 
1324  auto get_clayout = [&] () -> std::tuple<BoxArray,BoxArray,DistributionMapping>
1325  {
1326  if (level == 0) {
1328  } else {
1329  BoxArray const& ba = mf.boxArray();
1330  auto const& typ = ba.ixType();
1331  std::map<int,Vector<Box>> extra_boxes_map;
1332  BoxList cbl(typ);
1333  cbl.reserve(ba.size());
1334  for (int i = 0, N = int(ba.size()); i < N; ++i) {
1335  Box const& cbox = mapper->CoarseBox(amrex::grow(ba[i],nghost),ratio[level-1]);
1336  cbl.push_back(cbox);
1337  Box gdomain = geom[level-1].growNonPeriodicDomain(cbox.length());
1338  gdomain.convert(typ);
1339  if (!gdomain.contains(cbox)) {
1340  auto& extra_boxes = extra_boxes_map[i];
1341  auto const& pshift = geom[level-1].periodicity().shiftIntVect();
1342  for (auto const& piv : pshift) {
1343  auto const& ibox = amrex::shift(cbox,piv) & gdomain;
1344  if (ibox.ok()) {
1345  extra_boxes.push_back(ibox);
1346  }
1347  }
1348  }
1349  }
1350 
1351  BoxArray cba2;
1352  DistributionMapping dm2;
1353  if (!extra_boxes_map.empty()) {
1354  BoxList cbl2 = cbl;
1355  auto& lbox = cbl2.data();
1356  DistributionMapping const& dm = mf.DistributionMap();
1357  Vector<int> procmap2 = dm.ProcessorMap();
1358  for (auto const& [i, vb] : extra_boxes_map) {
1359  lbox[i] = vb[0];
1360  for (int j = 1, nj = int(vb.size()); j < nj; ++j) {
1361  lbox.push_back(vb[j]);
1362  procmap2.push_back(dm[i]);
1363  }
1364  }
1365  cba2 = BoxArray(std::move(cbl2));
1366  dm2 = DistributionMapping(std::move(procmap2));
1367  }
1368 
1369  return std::make_tuple(BoxArray(std::move(cbl)), cba2, dm2);
1370  }
1371  };
1372 
1373 #ifdef AMREX_USE_EB
1374  EB2::IndexSpace const* index_space = EB2::TopIndexSpaceIfPresent();
1375 #else
1376  EB2::IndexSpace const* index_space = nullptr;
1377 #endif
1378 
1379  AMREX_ALWAYS_ASSERT(level < int(geom.size()) &&
1380  level < int(bc.size()) &&
1381  level < int(ratio.size()+1));
1382  if (level == 0) {
1383  FillPatchSingleLevel(mf, nghost, time, smf[0], st[0], scomp, dcomp, ncomp, geom[0],
1384  bc[0], bccomp);
1385  } else if (level >= int(smf.size()))
1386  {
1387  auto const& [ba1, ba2, dm2] = get_clayout();
1388  MF cmf1, cmf2;
1389 #ifdef AMREX_USE_EB
1390  if (index_space) {
1391  auto factory = makeEBFabFactory(index_space, geom[level-1], ba1,
1392  mf.DistributionMap(), {0,0,0},
1394  cmf1.define(ba1, mf.DistributionMap(), ncomp, 0, MFInfo(), *factory);
1395  if (!ba2.empty()) {
1396  auto factory2 = makeEBFabFactory(index_space, geom[level-1], ba2,
1397  dm2, {0,0,0},
1399  cmf2.define(ba2, dm2, ncomp, 0, MFInfo(), *factory2);
1400  }
1401  } else
1402 #endif
1403  {
1404  cmf1.define(ba1, mf.DistributionMap(), ncomp, 0);
1405  if (!ba2.empty()) {
1406  cmf2.define(ba2, dm2, ncomp, 0);
1407  }
1408  }
1409 
1410  MF* p_mf_inside = (ba2.empty()) ? &cmf1 : &cmf2;
1411  FillPatchNLevels(*p_mf_inside, level-1, IntVect(0), time, smf, st, scomp, 0, ncomp,
1412  geom, bc, bccomp, ratio, mapper, bcr, bcrcomp);
1413  if (&cmf1 != p_mf_inside) {
1414  cmf1.ParallelCopy(*p_mf_inside, geom[level-1].periodicity());
1415  }
1416  Box domain_g = geom[level].growPeriodicDomain(nghost);
1417  domain_g.convert(mf.ixType());
1418  FillPatchInterp(mf, dcomp, cmf1, 0, ncomp, nghost, geom[level-1], geom[level],
1419  domain_g, ratio[level-1], mapper, bcr, bcrcomp);
1420  } else {
1422  int error_code = detail::FillPatchTwoLevels_doit(mf, nghost, time,
1423  smf[level-1], st[level-1],
1424  smf[level ], st[level ],
1425  scomp, dcomp, ncomp,
1426  geom[level-1], geom[level],
1427  bc[level-1], bccomp,
1428  bc[level ], bccomp,
1429  ratio[level-1], mapper, bcr, bcrcomp,
1430  hook, hook, index_space, true);
1431  if (error_code == 0) { return; }
1432 
1433  auto const& [ba1, ba2, dm2] = get_clayout();
1434  MF cmf_tmp;
1435 #ifdef AMREX_USE_EB
1436  if (index_space) {
1437  if (ba2.empty()) {
1438  auto factory = makeEBFabFactory(index_space, geom[level-1], ba1,
1439  mf.DistributionMap(), {0,0,0},
1441  cmf_tmp.define(ba1, mf.DistributionMap(), ncomp, 0, MFInfo(), *factory);
1442  } else {
1443  auto factory = makeEBFabFactory(index_space, geom[level-1], ba2,
1444  dm2, {0,0,0},
1446  cmf_tmp.define(ba2, dm2, ncomp, 0, MFInfo(), *factory);
1447  }
1448  } else
1449 #endif
1450  {
1451  if (ba2.empty()) {
1452  cmf_tmp.define(ba1, mf.DistributionMap(), ncomp, 0);
1453  } else {
1454  cmf_tmp.define(ba2, dm2, ncomp, 0);
1455  }
1456  }
1457 
1458  FillPatchNLevels(cmf_tmp, level-1, IntVect(0), time, smf, st, scomp, 0, ncomp,
1459  geom, bc, bccomp, ratio, mapper, bcr, bcrcomp);
1460 
1461  Vector<MF*> cmf{&cmf_tmp};
1462  Vector<MF*> fmf = smf[level];
1463  Vector<MF> fmf_raii;
1464  if (scomp != 0) {
1465  for (auto const* p : fmf) {
1466  fmf_raii.emplace_back(*p, amrex::make_alias, scomp, ncomp);
1467  }
1468  }
1469 
1470  detail::FillPatchTwoLevels_doit(mf, nghost, time,
1471  cmf, {time},
1472  fmf, st[level],
1473  0, dcomp, ncomp,
1474  geom[level-1], geom[level],
1475  bc[level-1], bccomp,
1476  bc[level ], bccomp,
1477  ratio[level-1], mapper, bcr, bccomp,
1478  hook, hook, index_space);
1479  }
1480 }
1481 
1482 }
1483 
1484 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:530
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition: AMReX_BoxArray.H:817
bool contains(const IntVect &v) const
True if the IntVect is within any of the Boxes in this BoxArray.
Definition: AMReX_BoxArray.cpp:967
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:800
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:577
void set(int i, const Box &ibox)
Set element i in this BoxArray to Box ibox.
Definition: AMReX_BoxArray.cpp:868
bool empty() const noexcept
Return whether the BoxArray is empty.
Definition: AMReX_BoxArray.H:583
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
Vector< Box > & data() noexcept
Returns a reference to the Vector<Box>.
Definition: AMReX_BoxList.H:215
void reserve(std::size_t n)
Definition: AMReX_BoxList.H:90
void push_back(const Box &bn)
Append a Box to this BoxList.
Definition: AMReX_BoxList.H:93
AMREX_GPU_HOST_DEVICE BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition: AMReX_Box.H:684
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE IndexTypeND< dim > ixType() const noexcept
Returns the indexing type.
Definition: AMReX_Box.H:127
AMREX_GPU_HOST_DEVICE BoxND & convert(IndexTypeND< dim > typ) noexcept
Convert the BoxND from the current type into the argument type. This may change the BoxND coordinates...
Definition: AMReX_Box.H:912
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
const Vector< int > & ProcessorMap() const noexcept
Returns a constant reference to the mapping of boxes in the underlying BoxArray to the CPU that holds...
Definition: AMReX_DistributionMapping.cpp:47
Definition: AMReX_EB2.H:26
A Fortran Array of REALs.
Definition: AMReX_FArrayBox.H:229
static const FPinfo & TheFPinfo(const FabArrayBase &srcfa, const FabArrayBase &dstfa, const IntVect &dstng, const BoxConverter &coarsener, const Geometry &fgeom, const Geometry &cgeom, const EB2::IndexSpace *)
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition: AMReX_Geometry.H:355
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition: AMReX_Geometry.H:210
bool isPeriodic(int dir) const noexcept
Is the domain periodic in the specified direction?
Definition: AMReX_Geometry.H:331
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE CellIndex ixType(int dir) const noexcept
Returns the CellIndex in direction dir.
Definition: AMReX_IndexType.H:116
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition: AMReX_IntVect.H:443
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allLE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is less than or equal to argument for all components. NOTE: This is NOT a strict...
Definition: AMReX_IntVect.H:393
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int min() const noexcept
minimum (no absolute values) value
Definition: AMReX_IntVect.H:225
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int max() const noexcept
maximum (no absolute values) value
Definition: AMReX_IntVect.H:214
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_InterpBase.H:26
Definition: AMReX_InterpBase.H:15
Box doit(const Box &fine) const override
Definition: AMReX_InterpBase.cpp:10
Virtual base class for interpolaters.
Definition: AMReX_Interpolater.H:22
Definition: AMReX_MFInterpolater.H:15
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_PhysBCFunct.H:127
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
@ FAB
Definition: AMReX_AmrvisConstants.H:86
const IndexSpace * TopIndexSpaceIfPresent() noexcept
Definition: AMReX_EB2.cpp:76
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
@ max
Definition: AMReX_ParallelReduce.H:17
MF make_mf_fine_patch(FabArrayBase::FPinfo const &fpc, int ncomp)
Definition: AMReX_FillPatchUtil_I.H:335
void mf_set_domain_bndry(MF &mf, Geometry const &geom)
Definition: AMReX_FillPatchUtil_I.H:379
MF make_mf_refined_patch(FabArrayBase::FPinfo const &fpc, int ncomp, IndexType idx_type, IntVect ratio)
Definition: AMReX_FillPatchUtil_I.H:357
constexpr AMREX_GPU_HOST_DEVICE R make_tuple(TP1 const &a, TP2 const &b, std::index_sequence< N1... > const &, std::index_sequence< N2... > const &)
Definition: AMReX_Tuple.H:274
MF make_mf_crse_patch(FabArrayBase::FPinfo const &fpc, int ncomp)
Definition: AMReX_FillPatchUtil_I.H:313
MF make_mf_crse_mask(FabArrayBase::FPinfo const &fpc, int ncomp, IndexType idx_type, IntVect ratio)
Definition: AMReX_FillPatchUtil_I.H:368
std::enable_if_t< IsFabArray< MF >::value, int > FillPatchTwoLevels_doit(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp, const PostInterpHook &post_interp, EB2::IndexSpace const *index_space, bool return_error_code=false)
Definition: AMReX_FillPatchUtil_I.H:452
auto call_interp_hook(F const &f, MF &mf, int icomp, int ncomp) -> decltype(f(mf[0], Box(), icomp, ncomp))
Definition: AMReX_FillPatchUtil_I.H:10
Definition: AMReX_Amr.cpp:49
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition: AMReX_Box.H:1372
@ make_alias
Definition: AMReX_MakeType.H:7
int nComp(FabArrayBase const &fa)
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
IntVect nGrowVect(FabArrayBase const &fa)
RunOn
Definition: AMReX_GpuControl.H:69
bool ProperlyNested(const IntVect &ratio, const IntVect &blocking_factor, int ngrow, const IndexType &boxType, Interp *mapper)
Test if AMR grids are properly nested.
Definition: AMReX_FillPatchUtil_I.H:33
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp)
FillPatch with data from the current level.
Definition: AMReX_FillPatchUtil_I.H:73
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
void Copy(FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:178
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > InterpFace(Interp *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bcscomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs)
Definition: AMReX_FillPatchUtil_I.H:253
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
FillPatch with data from the current level and the level below.
Definition: AMReX_FillPatchUtil_I.H:796
BoxArray const & boxArray(FabArrayBase const &fa)
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel(MF &mf, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
Fill with interpolation of coarse level data.
Definition: AMReX_FillPatchUtil_I.H:984
@ basic
EBCellFlag.
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
std::enable_if_t< IsFabArray< MF >::value > FillPatchNLevels(MF &mf, int level, const IntVect &nghost, Real time, const Vector< Vector< MF * >> &smf, const Vector< Vector< Real >> &st, int scomp, int dcomp, int ncomp, const Vector< Geometry > &geom, Vector< BC > &bc, int bccomp, const Vector< IntVect > &ratio, Interp *mapper, const Vector< BCRec > &bcr, int bcrcomp)
FillPatch with data from AMR levels.
Definition: AMReX_FillPatchUtil_I.H:1307
void FillPatchInterp(MultiFab &mf_fine_patch, int fcomp, MultiFab const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, MFInterpolater *mapper, const Vector< BCRec > &bcs, int bcscomp)
Definition: AMReX_FillPatchUtil.cpp:140
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition: AMReX_Algorithm.H:93
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition: AMReX_Box.H:1342
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
void setBC(const Box &bx, const Box &domain, int src_comp, int dest_comp, int ncomp, const Vector< BCRec > &bc_dom, Vector< BCRec > &bcr) noexcept
Function for setting array of BCs.
Definition: AMReX_BCRec.cpp:8
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:221
IndexTypeND< AMREX_SPACEDIM > IndexType
Definition: AMReX_BaseFwd.H:33
std::unique_ptr< EBFArrayBoxFactory > makeEBFabFactory(const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support)
Definition: AMReX_EBFabFactory.cpp:119
std::array< T, N > Array
Definition: AMReX_Array.H:23
Definition: AMReX_FabArrayCommI.H:841
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
Definition: AMReX_FabArrayBase.H:304
std::unique_ptr< FabFactory< FArrayBox > > fact_crse_patch
Definition: AMReX_FabArrayBase.H:319
DistributionMapping dm_patch
Definition: AMReX_FabArrayBase.H:318
BoxArray ba_crse_patch
Definition: AMReX_FabArrayBase.H:316
std::unique_ptr< FabFactory< FArrayBox > > fact_fine_patch
Definition: AMReX_FabArrayBase.H:320
BoxArray ba_fine_patch
Definition: AMReX_FabArrayBase.H:317
FabArray memory allocation information.
Definition: AMReX_FabArray.H:65
Definition: AMReX_FillPatchUtil.H:34