1 #ifndef AMREX_FillPatchUtil_I_H_
2 #define AMREX_FillPatchUtil_I_H_
3 #include <AMReX_Config.H>
9 template <
typename F,
typename MF>
11 -> decltype(
f(mf[0],
Box(),icomp,ncomp))
14 #pragma omp parallel if (Gpu::notInLaunchRegion())
18 const Box& dbx = dfab.box();
19 f(dfab, dbx, icomp, ncomp);
23 template <
typename F,
typename MF>
25 -> decltype(
f(mf,icomp,ncomp))
32 template <
typename Interp>
36 int ratio_max = ratio[0];
37 #if (AMREX_SPACEDIM > 1)
38 ratio_max =
std::max(ratio_max, ratio[1]);
40 #if (AMREX_SPACEDIM == 3)
41 ratio_max =
std::max(ratio_max, ratio[2]);
45 const IntVect& nbuf = blocking_factor / ratio_max;
53 fine_box.
refine(ratio_max);
55 const Box& fine_box_coarsened = mapper->CoarseBox(fine_box, ratio_max);
56 return crse_box.
contains(fine_box_coarsened);
59 template <
typename MF,
typename BC>
60 std::enable_if_t<IsFabArray<MF>::value>
63 int scomp,
int dcomp,
int ncomp,
65 BC& physbcf,
int bcfcomp)
68 geom, physbcf, bcfcomp);
71 template <
typename MF,
typename BC>
72 std::enable_if_t<IsFabArray<MF>::value>
75 int scomp,
int dcomp,
int ncomp,
77 BC& physbcf,
int bcfcomp)
88 if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
89 src_ghost = physbcf.fp1_src_ghost;
94 if (&mf == smf[0] && scomp == dcomp) {
95 mf.FillBoundary(dcomp, ncomp, nghost, geom.
periodicity());
97 mf.ParallelCopy(*smf[0], scomp, dcomp, ncomp, src_ghost, nghost, geom.
periodicity());
100 else if (smf.
size() == 2)
107 if (mf.boxArray() == smf[0]->boxArray() &&
108 mf.DistributionMap() == smf[0]->DistributionMap())
115 MFInfo(), smf[0]->Factory());
122 if ((dmf != smf[0] && dmf != smf[1]) || scomp != dcomp)
125 if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
126 interp_ghost = physbcf.fp1_src_ghost;
128 interp_ghost.
min(nghost);
132 #pragma omp parallel if (Gpu::notInLaunchRegion())
136 const Box& bx = mfi.growntilebox(interp_ghost);
137 const Real t0 = stime[0];
138 const Real t1 = stime[1];
139 auto const sfab0 = smf[0]->array(mfi);
140 auto const sfab1 = smf[1]->array(mfi);
141 auto dfab = dmf->array(mfi);
147 dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp);
154 dfab(i,j,k,n+destcomp) = sfab1(i,j,k,n+scomp);
159 Real alpha = (t1-time)/(t1-t0);
160 Real beta = (time-t0)/(t1-t0);
163 dfab(i,j,k,n+destcomp) = alpha*sfab0(i,j,k,n+scomp)
164 + beta*sfab1(i,j,k,n+scomp);
171 dfab(i,j,k,n+destcomp) = sfab0(i,j,k,n+scomp);
181 mf.FillBoundary(dcomp, ncomp, nghost, geom.
periodicity());
185 mf.ParallelCopy(*dmf, 0, dcomp, ncomp, src_ghost, nghost, geom.
periodicity());
189 amrex::Abort(
"FillPatchSingleLevel: high-order interpolation in time not implemented yet");
192 physbcf(mf, dcomp, ncomp, nghost, time, bcfcomp);
195 void FillPatchInterp (MultiFab& mf_fine_patch,
int fcomp, MultiFab
const& mf_crse_patch,
int ccomp,
196 int ncomp,
IntVect const& ng,
const Geometry& cgeom,
const Geometry& fgeom,
198 MFInterpolater* mapper,
const Vector<BCRec>& bcs,
int bcscomp);
200 template <
typename MF,
typename Interp>
201 std::enable_if_t<IsFabArray<MF>::value && !std::is_same_v<Interp,MFInterpolater>>
212 #pragma omp parallel if (Gpu::notInLaunchRegion())
218 auto& sfab = mf_crse_patch[mfi];
219 const Box& sbx = sfab.box();
221 auto& dfab = mf_fine_patch[mfi];
225 mapper->interp(sfab, ccomp, dfab, fcomp, ncomp, dbx, ratio,
226 cgeom, fgeom, bcr, idummy, idummy,
RunOn::Gpu);
231 template <
typename MF>
232 std::enable_if_t<IsFabArray<MF>::value>
240 ncomp, ng, cgeom, fgeom, dest_domain, ratio,
244 ncomp, ng, cgeom, fgeom, dest_domain, ratio,
251 template <
typename MF,
typename iMF,
typename Interp>
252 std::enable_if_t<IsFabArray<MF>::value && !std::is_same_v<Interp,MFInterpolater>>
254 MF
const& mf_crse_patch,
int crse_comp,
255 MF& mf_refined_patch,
int fine_comp,
256 int ncomp,
const IntVect& ratio,
257 const iMF& solve_mask,
const Geometry& crse_geom,
const Geometry& fine_geom,
258 int bcscomp,
RunOn gpu_or_cpu,
265 auto& sfab = mf_crse_patch[mfi];
266 const Box& sbx = sfab.box();
267 auto& dfab = mf_refined_patch[mfi];
268 Box const& dbx = dfab.box();
269 auto& ifab = solve_mask[mfi];
271 interp->interp_face(sfab,crse_comp,dfab,fine_comp,ncomp,
272 dbx, ratio, ifab, crse_geom, fine_geom,
273 bcr, bcscomp, gpu_or_cpu);
277 template <
typename MF,
typename iMF>
278 std::enable_if_t<IsFabArray<MF>::value>
280 MF
const& mf_crse_patch,
int crse_comp,
281 MF& mf_refined_patch,
int fine_comp,
282 int ncomp,
const IntVect& ratio,
283 const iMF& solve_mask,
const Geometry& crse_geom,
const Geometry& fine_geom,
284 int bccomp,
RunOn gpu_or_cpu,
289 mf_crse_patch, crse_comp,mf_refined_patch, fine_comp,
290 ncomp, ratio, solve_mask, crse_geom, fine_geom, bccomp,
295 mf_crse_patch, crse_comp,mf_refined_patch, fine_comp,
296 ncomp, ratio, solve_mask, crse_geom, fine_geom, bccomp,
309 template <
typename MF,
310 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
317 return mf_crse_patch;
320 template <
typename MF,
321 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
328 return mf_crse_patch;
331 template <
typename MF,
332 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
339 return mf_fine_patch;
342 template <
typename MF,
343 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
350 return mf_fine_patch;
353 template <
typename MF,
354 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
361 return mf_refined_patch;
364 template <
typename MF,
365 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
375 template <
typename MF,
376 std::enable_if_t<std::is_same_v<
typename MF::FABType::value_type,
381 mf.setDomainBndry(std::numeric_limits<Real>::quiet_NaN(), geom);
387 template <
typename MF,
388 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
396 template <
typename MF,
397 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
402 return MF(
amrex::convert(fpc.ba_crse_patch, idx_type), fpc.dm_patch, ncomp, 0);
405 template <
typename MF,
406 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
411 return MF(fpc.ba_fine_patch, fpc.dm_patch, ncomp, 0);
414 template <
typename MF,
415 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
420 return MF(
amrex::convert(fpc.ba_fine_patch, idx_type), fpc.dm_patch, ncomp, 0);
423 template <
typename MF,
424 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
432 template <
typename MF,
433 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
441 template <
typename MF,
442 std::enable_if_t<!std::is_same_v<
typename MF::FABType::value_type,
450 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
451 std::enable_if_t<IsFabArray<MF>::value,
int>
455 int scomp,
int dcomp,
int ncomp,
457 BC& cbc,
int cbccomp,
458 BC& fbc,
int fbccomp,
462 const PreInterpHook& pre_interp,
463 const PostInterpHook& post_interp,
465 bool return_error_code =
false)
469 int success_code = return_error_code ? 0 : -1;
470 int failure_code = 1;
472 if (nghost.
max() > 0 || mf.getBDKey() != fmf[0]->getBDKey())
478 + mf.ixType().nodeCentered(1),
479 + mf.ixType().nodeCentered(2) ) == 1 )
482 amrex::Abort(
"This interpolater has not yet implemented a version for face-based data");
487 mf.DistributionMap(), ncomp, nghost,
MFInfo().SetAlloc(
false) );
500 if (return_error_code) {
507 MF mf_crse_patch = make_mf_crse_patch<MF> (fpc, ncomp, mf.boxArray().ixType());
512 MF mf_refined_patch = make_mf_refined_patch<MF> (fpc, ncomp, mf.boxArray().ixType(), ratio);
513 auto solve_mask = make_mf_crse_mask<iMultiFab>(fpc, ncomp, mf.boxArray().ixType(), ratio);
516 if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
517 cbc.fp1_src_ghost = cbc.cghost;
520 cgeom, cbc, cbccomp);
523 if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
524 fbc.fp1_src_ghost =
IntVect(0);
527 fgeom, fbc, fbccomp);
531 ncomp, nghost,
MFInfo().SetAlloc(
false) );
532 MF mf_solution(
amrex::coarsen(mf_refined_patch.boxArray(), ratio), mf_refined_patch.DistributionMap(),
533 ncomp, 0,
MFInfo().SetAlloc(
false) );
539 solve_mask.setVal(1);
540 solve_mask.setVal(0, mask_cpc, 0, 1);
544 InterpFace(mapper, mf_crse_patch, 0, mf_refined_patch, 0, ncomp,
545 ratio, solve_mask, cgeom, fgeom, bcscomp,
RunOn::Gpu, bcs);
549 bool aliasing =
false;
550 for (
auto const& fmf_a : fmf) {
551 aliasing = aliasing || (&mf == fmf_a);
554 mf.ParallelCopyToGhost(mf_refined_patch, 0, dcomp, ncomp,
557 mf.ParallelCopy(mf_refined_patch, 0, dcomp, ncomp,
573 if (return_error_code) {
574 BoxArray const& cba = cmf[0]->boxArray();
580 MF mf_crse_patch = make_mf_crse_patch<MF>(fpc, ncomp);
583 if constexpr (std::is_same_v<BC,PhysBCFunctUseCoarseGhost>) {
584 cbc.fp1_src_ghost = cbc.cghost;
588 MF mf_fine_patch = make_mf_fine_patch<MF>(fpc, ncomp);
593 for (
int i = 0; i < AMREX_SPACEDIM; ++i) {
595 fdomain_g.
grow(i, nghost[i]);
597 if constexpr (std::is_same_v
599 fdomain_g.
grow(i, fbc.nghost_outside_domain[i]);
604 ncomp,
IntVect(0), cgeom, fgeom,
605 fdomain_g, ratio, mapper, bcs, bcscomp);
609 mf.ParallelCopy(mf_fine_patch, 0, dcomp, ncomp,
IntVect{0}, nghost);
614 if constexpr(std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
615 fbc.fp1_src_ghost =
IntVect(0);
618 fgeom, fbc, fbccomp);
623 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
624 std::enable_if_t<IsFabArray<MF>::value>
628 int scomp,
int dcomp,
int ncomp,
635 const PreInterpHook& pre_interp,
636 const PostInterpHook& post_interp,
639 BL_PROFILE(
"FillPatchTwoLevels (Array<MF*>)");
641 using FAB =
typename MF::FABType::value_type;
642 using iFAB =
typename iMultiFab::FABType::value_type;
645 && mf[1]->ixType().nodeCentered(1),
646 && mf[2]->ixType().nodeCentered(2)));
661 if (nghost.
max() > 0 || mf[0]->getBDKey() != fmf[0][0]->getBDKey())
684 for (
int d=0; d<AMREX_SPACEDIM; ++d)
686 mf_crse_patch[d] = make_mf_crse_patch<MF> (fpc, ncomp, mf[d]->
boxArray().ixType());
687 mf_refined_patch[d] = make_mf_refined_patch<MF> (fpc, ncomp, mf[d]->
boxArray().ixType(), ratio);
688 solve_mask[d] = make_mf_crse_mask<iMultiFab>(fpc, ncomp, mf[d]->
boxArray().ixType(), ratio);
692 for (
const auto & mfab : cmf)
693 { cmf_time.push_back(mfab[d]); }
696 cgeom, cbc[d], cbccomp[d]);
700 for (
const auto & mfab : fmf)
701 { fmf_time.push_back(mfab[d]); }
704 fgeom, fbc[d], fbccomp[d]);
709 ncomp, nghost,
MFInfo().SetAlloc(
false) );
711 ncomp, 0,
MFInfo().SetAlloc(
false) );
717 solve_mask[d].setVal(1);
718 solve_mask[d].setVal(0, mask_cpc, 0, 1);
725 #pragma omp parallel if (cc && Gpu::notInLaunchRegion() )
732 &(mf_crse_patch[1][mfi]),
733 &(mf_crse_patch[2][mfi]) )};
735 &(mf_refined_patch[1][mfi]),
736 &(mf_refined_patch[2][mfi]) )};
738 &(solve_mask[1][mfi]),
739 &(solve_mask[2][mfi]) )};
744 for (
int d=0; d<AMREX_SPACEDIM; ++d)
749 bcscomp[d],0,ncomp,bcs[d],bcr_d);
751 for (
int n=0; n<ncomp; ++n)
752 { bcr[n][d] = bcr_d[n]; }
755 pre_interp(sfab, sbx_cc, 0, ncomp);
757 mapper->interp_arr(sfab, 0, dfab, 0, ncomp, dbx_cc, ratio, mfab,
758 cgeom, fgeom, bcr, idummy, idummy,
RunOn::Gpu);
760 post_interp(dfab, dbx_cc, 0, ncomp);
764 for (
int d=0; d<AMREX_SPACEDIM; ++d)
766 bool aliasing =
false;
767 for (
auto const& fmf_a : fmf) {
768 aliasing = aliasing || (mf[d] == fmf_a[d]);
771 mf[d]->ParallelCopyToGhost(mf_refined_patch[d], 0, dcomp, ncomp,
774 mf[d]->ParallelCopy(mf_refined_patch[d], 0, dcomp, ncomp,
781 for (
int d=0; d<AMREX_SPACEDIM; ++d)
784 for (
auto const& ffab : fmf)
785 { fmf_time.push_back(ffab[d]); }
788 fgeom, fbc[d], fbccomp[d]);
794 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
795 std::enable_if_t<IsFabArray<MF>::value>
799 int scomp,
int dcomp,
int ncomp,
801 BC& cbc,
int cbccomp,
802 BC& fbc,
int fbccomp,
806 const PreInterpHook& pre_interp,
807 const PostInterpHook& post_interp)
815 scomp,dcomp,ncomp,cgeom,fgeom,
816 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
817 pre_interp,post_interp,index_space);
820 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
821 std::enable_if_t<IsFabArray<MF>::value>
825 int scomp,
int dcomp,
int ncomp,
827 BC& cbc,
int cbccomp,
828 BC& fbc,
int fbccomp,
832 const PreInterpHook& pre_interp,
833 const PostInterpHook& post_interp)
842 scomp,dcomp,ncomp,cgeom,fgeom,
843 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
844 pre_interp,post_interp,index_space);
847 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
848 std::enable_if_t<IsFabArray<MF>::value>
852 int scomp,
int dcomp,
int ncomp,
859 const PreInterpHook& pre_interp,
860 const PostInterpHook& post_interp)
869 scomp,dcomp,ncomp,cgeom,fgeom,
870 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
871 pre_interp,post_interp,index_space);
874 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
875 std::enable_if_t<IsFabArray<MF>::value>
879 int scomp,
int dcomp,
int ncomp,
886 const PreInterpHook& pre_interp,
887 const PostInterpHook& post_interp)
900 scomp,dcomp,ncomp,cgeom,fgeom,
901 cbc,cbccomp_arr,fbc,fbccomp_arr,ratio,mapper,bcs,bcscomp_arr,
902 pre_interp,post_interp,index_space);
905 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
906 std::enable_if_t<IsFabArray<MF>::value>
910 int scomp,
int dcomp,
int ncomp,
917 const PreInterpHook& pre_interp,
918 const PostInterpHook& post_interp)
931 scomp,dcomp,ncomp,cgeom,fgeom,
932 cbc,cbccomp_arr,fbc,fbccomp_arr,ratio,mapper,bcs,bcscomp_arr,
933 pre_interp,post_interp,index_space);
937 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
938 std::enable_if_t<IsFabArray<MF>::value>
940 const EB2::IndexSpace& index_space,
941 const Vector<MF*>& cmf,
const Vector<Real>& ct,
942 const Vector<MF*>& fmf,
const Vector<Real>& ft,
943 int scomp,
int dcomp,
int ncomp,
944 const Geometry& cgeom,
const Geometry& fgeom,
945 BC& cbc,
int cbccomp,
946 BC& fbc,
int fbccomp,
949 const Vector<BCRec>& bcs,
int bcscomp,
950 const PreInterpHook& pre_interp,
951 const PostInterpHook& post_interp)
954 scomp,dcomp,ncomp,cgeom,fgeom,
955 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
956 pre_interp,post_interp,&index_space);
959 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
960 std::enable_if_t<IsFabArray<MF>::value>
962 const EB2::IndexSpace& index_space,
963 const Vector<MF*>& cmf,
const Vector<Real>& ct,
964 const Vector<MF*>& fmf,
const Vector<Real>& ft,
965 int scomp,
int dcomp,
int ncomp,
966 const Geometry& cgeom,
const Geometry& fgeom,
967 BC& cbc,
int cbccomp,
968 BC& fbc,
int fbccomp,
971 const Vector<BCRec>& bcs,
int bcscomp,
972 const PreInterpHook& pre_interp,
973 const PostInterpHook& post_interp)
976 scomp,dcomp,ncomp,cgeom,fgeom,
977 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
978 pre_interp,post_interp,&index_space);
982 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
983 std::enable_if_t<IsFabArray<MF>::value>
985 const MF& cmf,
int scomp,
int dcomp,
int ncomp,
987 BC& cbc,
int cbccomp,
988 BC& fbc,
int fbccomp,
992 const PreInterpHook& pre_interp,
993 const PostInterpHook& post_interp)
996 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
997 pre_interp,post_interp);
1000 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
1001 std::enable_if_t<IsFabArray<MF>::value>
1010 const PreInterpHook& pre_interp,
1011 const PostInterpHook& post_interp)
1014 cbc,cbccomp,fbc,fbccomp,ratio,mapper,bcs,bcscomp,
1015 pre_interp,post_interp);
1018 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
1019 std::enable_if_t<IsFabArray<MF>::value>
1021 const MF& cmf,
int scomp,
int dcomp,
int ncomp,
1023 BC& cbc,
int cbccomp,
1024 BC& fbc,
int fbccomp,
1028 const PreInterpHook& pre_interp,
1029 const PostInterpHook& post_interp)
1033 using FAB =
typename MF::FABType::value_type;
1037 const BoxArray& ba = mf.boxArray();
1045 for (
int i = 0; i < AMREX_SPACEDIM; ++i) {
1047 fdomain_g.
grow(i, nghost[i]);
1049 if constexpr (std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
1050 fdomain_g.
grow(i, fbc.nghost_outside_domain[i]);
1056 IntVect send_ghost(0), recv_ghost(0);
1057 if constexpr (std::is_same_v<BC, PhysBCFunctUseCoarseGhost>) {
1058 mf_crse_patch.define(
amrex::coarsen(ba,ratio), dm, ncomp, fbc.src_ghost);
1059 send_ghost = fbc.cghost;
1060 recv_ghost = fbc.src_ghost;
1064 for (
int i = 0, N = ba.
size(); i < N; ++i)
1068 ba_crse_patch.
set(i, coarsener.
doit(bx));
1075 mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0,
MFInfo(), *factory);
1079 mf_crse_patch.define(ba_crse_patch, dm, ncomp, 0);
1084 mf_crse_patch.ParallelCopy(cmf, scomp, 0, ncomp, send_ghost, recv_ghost,
1087 cbc(mf_crse_patch, 0, ncomp, mf_crse_patch.nGrowVect(), time, cbccomp);
1091 FillPatchInterp(mf, dcomp, mf_crse_patch, 0, ncomp, nghost, cgeom, fgeom, fdomain_g,
1092 ratio, mapper, bcs, bcscomp);
1094 #ifdef AMREX_USE_OMP
1095 #pragma omp parallel if (Gpu::notInLaunchRegion())
1099 FAB& dfab = mf[mfi];
1100 Box dfab_bx = dfab.box();
1101 dfab_bx.
grow(nghost-mf.nGrowVect());
1102 const Box& dbx = dfab_bx & fdomain_g;
1104 post_interp(dfab, dbx, dcomp, ncomp);
1107 fbc(mf, dcomp, ncomp, nghost, time, fbccomp);
1110 template <
typename MF,
typename BC,
typename Interp,
typename PreInterpHook,
typename PostInterpHook>
1111 std::enable_if_t<IsFabArray<MF>::value>
1120 const PreInterpHook& pre_interp,
1121 const PostInterpHook& post_interp)
1125 using FAB =
typename MF::FABType::value_type;
1126 using iFAB =
typename iMultiFab::FABType::value_type;
1129 const BoxArray& ba = mf[0]->boxArray();
1133 && mf[1]->ixType().nodeCentered(1),
1134 && mf[2]->ixType().nodeCentered(2)));
1149 for (
int d=0; d<AMREX_SPACEDIM; ++d) {
1150 if (nghost[d] % ratio[d] != 0) {
1151 nghost_adj[d] += ratio[d] - (nghost[d] % ratio[d]);
1156 int dcomp_adj = dcomp;
1158 if (! nghost.
allGE(nghost_adj)) {
1159 for (
int d=0; d<AMREX_SPACEDIM; ++d) {
1160 mf_temp[d] = std::make_unique<MF>(mf[d]->
boxArray(),
1162 mf_local[d] = mf_temp[d].get();
1170 Box fdomain_g(fdomain);
1171 for (
int d = 0; d < AMREX_SPACEDIM; ++d) {
1173 fdomain_g.
grow(d,nghost_adj[d]);
1180 for (
int i = 0, N = ba.
size(); i < N; ++i)
1184 ba_crse_patch.
set(i, coarsener.
doit(bx));
1189 for (
int d = 0; d<AMREX_SPACEDIM; ++d)
1196 mf_crse_patch[d].define(ba_crse_idxed, dm, ncomp, 0,
MFInfo(), *crse_factory);
1198 mf_crse_patch[d].define(ba_crse_idxed, dm, ncomp, 0);
1202 mf_crse_patch[d].ParallelCopy(*(cmf[d]), scomp, 0, ncomp, cgeom.
periodicity());
1203 cbc[d](mf_crse_patch[d], 0, ncomp, mf_crse_patch[d].nGrowVect(), time, cbccomp);
1206 int idummy1=0, idummy2=0;
1207 #ifdef AMREX_USE_OMP
1208 #pragma omp parallel if (Gpu::notInLaunchRegion())
1219 &(mf_crse_patch[1][mfi]),
1220 &(mf_crse_patch[2][mfi]) )};
1222 &(*mf_local[1])[mfi],
1223 &(*mf_local[2])[mfi] )};
1227 const Box& dbx_cc = dfab_cc & fdomain_g;
1229 for (
int d=0; d<AMREX_SPACEDIM; ++d)
1235 bcscomp,0,ncomp,bcs[d],bcr_d);
1237 for (
int n=0; n<ncomp; ++n)
1238 { bcr[n][d] = bcr_d[n]; }
1241 pre_interp(sfab, sbx_cc, 0, ncomp);
1243 mapper->interp_arr(sfab, 0, dfab, 0, ncomp, dbx_cc, ratio, mfab,
1244 cgeom, fgeom, bcr, idummy1, idummy2,
RunOn::Gpu);
1246 post_interp(dfab, dbx_cc, 0, ncomp);
1250 for (
int d=0; d<AMREX_SPACEDIM; ++d)
1252 if (mf[d] != mf_local[d]) {
1253 amrex::Copy(*mf[d], *mf_local[d], 0, dcomp_adj, ncomp, nghost);
1256 fbc[d](*mf[d], dcomp, ncomp, nghost, time, fbccomp);
1260 template <
typename MF,
typename Interp>
1261 std::enable_if_t<IsFabArray<MF>::value>
1263 IntVect const& nghost_outside_domain,
1264 const MF& cmf,
int scomp,
int dcomp,
int ncomp,
1266 const IntVect& ratio, Interp* mapper,
1271 cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper,
1275 template <
typename MF>
1276 std::enable_if_t<IsFabArray<MF>::value>
1279 const Vector<Real>& stime,
int scomp,
int dcomp,
int ncomp,
1287 template <
typename MF,
typename Interp>
1288 std::enable_if_t<IsFabArray<MF>::value>
1290 IntVect const& nghost_outside_domain, Real time,
1293 int scomp,
int dcomp,
int ncomp,
1295 const IntVect& ratio, Interp* mapper,
1301 FillPatchTwoLevels(mf, nghost, time, cmf, ct, fmf, ft, scomp, dcomp, ncomp,
1302 cgeom, fgeom, erfbc, 0, erfbc, 0, ratio, mapper,
1306 template <
typename MF,
typename BC,
typename Interp>
1307 std::enable_if_t<IsFabArray<MF>::value>
1310 int scomp,
int dcomp,
int ncomp,
1325 auto get_clayout = [&] () -> std::tuple<BoxArray,BoxArray,DistributionMapping>
1330 BoxArray const& ba = mf.boxArray();
1331 auto const& typ = ba.
ixType();
1332 std::map<int,Vector<Box>> extra_boxes_map;
1335 for (
int i = 0, N =
int(ba.
size()); i < N; ++i) {
1336 Box const& cbox = mapper->CoarseBox(
amrex::grow(ba[i],nghost),ratio[level-1]);
1338 Box gdomain = geom[level-1].growNonPeriodicDomain(cbox.
length());
1341 auto& extra_boxes = extra_boxes_map[i];
1342 auto const& pshift = geom[level-1].periodicity().shiftIntVect();
1343 for (
auto const& piv : pshift) {
1346 extra_boxes.push_back(ibox);
1354 if (!extra_boxes_map.empty()) {
1356 auto& lbox = cbl2.
data();
1359 for (
auto const& [i, vb] : extra_boxes_map) {
1361 for (
int j = 1, nj =
int(vb.size()); j < nj; ++j) {
1362 lbox.push_back(vb[j]);
1363 procmap2.push_back(dm[i]);
1381 level <
int(bc.
size()) &&
1382 level <
int(ratio.
size()+1));
1386 }
else if (level >=
int(smf.size()))
1388 auto const& [ba1, ba2, dm2] = get_clayout();
1393 mf.DistributionMap(), {0,0,0},
1395 cmf1.define(ba1, mf.DistributionMap(), ncomp, 0,
MFInfo(), *factory);
1400 cmf2.define(ba2, dm2, ncomp, 0,
MFInfo(), *factory2);
1405 cmf1.define(ba1, mf.DistributionMap(), ncomp, 0);
1407 cmf2.define(ba2, dm2, ncomp, 0);
1411 MF* p_mf_inside = (ba2.empty()) ? &cmf1 : &cmf2;
1413 geom, bc, bccomp, ratio, mapper, bcr, bcrcomp);
1414 if (&cmf1 != p_mf_inside) {
1415 cmf1.ParallelCopy(*p_mf_inside, geom[level-1].periodicity());
1417 Box domain_g = geom[level].growPeriodicDomain(nghost);
1418 domain_g.
convert(mf.ixType());
1419 FillPatchInterp(mf, dcomp, cmf1, 0, ncomp, nghost, geom[level-1], geom[level],
1420 domain_g, ratio[level-1], mapper, bcr, bcrcomp);
1424 smf[level-1], st[level-1],
1425 smf[level ], st[level ],
1426 scomp, dcomp, ncomp,
1427 geom[level-1], geom[level],
1428 bc[level-1], bccomp,
1430 ratio[level-1], mapper, bcr, bcrcomp,
1431 hook, hook, index_space,
true);
1432 if (error_code == 0) {
return; }
1434 auto const& [ba1, ba2, dm2] = get_clayout();
1440 mf.DistributionMap(), {0,0,0},
1442 cmf_tmp.define(ba1, mf.DistributionMap(), ncomp, 0,
MFInfo(), *factory);
1447 cmf_tmp.define(ba2, dm2, ncomp, 0,
MFInfo(), *factory);
1453 cmf_tmp.define(ba1, mf.DistributionMap(), ncomp, 0);
1455 cmf_tmp.define(ba2, dm2, ncomp, 0);
1460 geom, bc, bccomp, ratio, mapper, bcr, bcrcomp);
1466 for (
auto const* p : fmf) {
1475 geom[level-1], geom[level],
1476 bc[level-1], bccomp,
1478 ratio[level-1], mapper, bcr, bccomp,
1479 hook, hook, index_space);
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:49
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition: AMReX_GpuLaunch.nolint.H:55
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:549
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition: AMReX_BoxArray.H:836
bool contains(const IntVect &v) const
True if the IntVect is within any of the Boxes in this BoxArray.
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:819
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:596
void set(int i, const Box &ibox)
Set element i in this BoxArray to Box ibox.
bool empty() const noexcept
Return whether the BoxArray is empty.
Definition: AMReX_BoxArray.H:602
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition: AMReX_BoxList.H:52
Vector< Box > & data() noexcept
Returns a reference to the Vector<Box>.
Definition: AMReX_BoxList.H:215
void reserve(std::size_t n)
Definition: AMReX_BoxList.H:90
void push_back(const Box &bn)
Append a Box to this BoxList.
Definition: AMReX_BoxList.H:93
AMREX_GPU_HOST_DEVICE BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition: AMReX_Box.H:684
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE IndexTypeND< dim > ixType() const noexcept
Returns the indexing type.
Definition: AMReX_Box.H:127
AMREX_GPU_HOST_DEVICE BoxND & convert(IndexTypeND< dim > typ) noexcept
Convert the BoxND from the current type into the argument type. This may change the BoxND coordinates...
Definition: AMReX_Box.H:912
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
const Vector< int > & ProcessorMap() const noexcept
Returns a constant reference to the mapping of boxes in the underlying BoxArray to the CPU that holds...
Definition: AMReX_DistributionMapping.cpp:47
Definition: AMReX_EB2.H:26
A Fortran Array of REALs.
Definition: AMReX_FArrayBox.H:229
static const FPinfo & TheFPinfo(const FabArrayBase &srcfa, const FabArrayBase &dstfa, const IntVect &dstng, const BoxConverter &coarsener, const Geometry &fgeom, const Geometry &cgeom, const EB2::IndexSpace *)
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Periodicity periodicity() const noexcept
Definition: AMReX_Geometry.H:355
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition: AMReX_Geometry.H:210
bool isPeriodic(int dir) const noexcept
Is the domain periodic in the specified direction?
Definition: AMReX_Geometry.H:331
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE CellIndex ixType(int dir) const noexcept
Returns the CellIndex in direction dir.
Definition: AMReX_IndexType.H:116
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allGE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is greater than or equal to argument for all components. NOTE: This is NOT a str...
Definition: AMReX_IntVect.H:443
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE bool allLE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is less than or equal to argument for all components. NOTE: This is NOT a strict...
Definition: AMReX_IntVect.H:393
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int min() const noexcept
minimum (no absolute values) value
Definition: AMReX_IntVect.H:225
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int max() const noexcept
maximum (no absolute values) value
Definition: AMReX_IntVect.H:214
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_InterpBase.H:26
Definition: AMReX_InterpBase.H:15
Box doit(const Box &fine) const override
Definition: AMReX_InterpBase.cpp:10
Virtual base class for interpolaters.
Definition: AMReX_Interpolater.H:22
Definition: AMReX_MFInterpolater.H:15
Definition: AMReX_MFIter.H:57
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition: AMReX_MFIter.H:141
Definition: AMReX_PhysBCFunct.H:127
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
@ FAB
Definition: AMReX_AmrvisConstants.H:86
const IndexSpace * TopIndexSpaceIfPresent() noexcept
Definition: AMReX_EB2.cpp:76
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
@ max
Definition: AMReX_ParallelReduce.H:17
MF make_mf_fine_patch(FabArrayBase::FPinfo const &fpc, int ncomp)
Definition: AMReX_FillPatchUtil_I.H:335
void mf_set_domain_bndry(MF &mf, Geometry const &geom)
Definition: AMReX_FillPatchUtil_I.H:379
MF make_mf_refined_patch(FabArrayBase::FPinfo const &fpc, int ncomp, IndexType idx_type, IntVect ratio)
Definition: AMReX_FillPatchUtil_I.H:357
constexpr AMREX_GPU_HOST_DEVICE R make_tuple(TP1 const &a, TP2 const &b, std::index_sequence< N1... > const &, std::index_sequence< N2... > const &)
Definition: AMReX_Tuple.H:274
MF make_mf_crse_patch(FabArrayBase::FPinfo const &fpc, int ncomp)
Definition: AMReX_FillPatchUtil_I.H:313
MF make_mf_crse_mask(FabArrayBase::FPinfo const &fpc, int ncomp, IndexType idx_type, IntVect ratio)
Definition: AMReX_FillPatchUtil_I.H:368
std::enable_if_t< IsFabArray< MF >::value, int > FillPatchTwoLevels_doit(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp, const PostInterpHook &post_interp, EB2::IndexSpace const *index_space, bool return_error_code=false)
Definition: AMReX_FillPatchUtil_I.H:452
auto call_interp_hook(F const &f, MF &mf, int icomp, int ncomp) -> decltype(f(mf[0], Box(), icomp, ncomp))
Definition: AMReX_FillPatchUtil_I.H:10
Definition: AMReX_Amr.cpp:49
DistributionMapping const & DistributionMap(FabArrayBase const &fa)
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > shift(const BoxND< dim > &b, int dir, int nzones) noexcept
Return a BoxND with indices shifted by nzones in dir direction.
Definition: AMReX_Box.H:1372
@ make_alias
Definition: AMReX_MakeType.H:7
int nComp(FabArrayBase const &fa)
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
IntVect nGrowVect(FabArrayBase const &fa)
RunOn
Definition: AMReX_GpuControl.H:69
bool ProperlyNested(const IntVect &ratio, const IntVect &blocking_factor, int ngrow, const IndexType &boxType, Interp *mapper)
Test if AMR grids are properly nested.
Definition: AMReX_FillPatchUtil_I.H:33
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp)
FillPatch with data from the current level.
Definition: AMReX_FillPatchUtil_I.H:73
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition: AMReX_Box.H:1211
void Copy(FabArray< DFAB > &dst, FabArray< SFAB > const &src, int srccomp, int dstcomp, int numcomp, int nghost)
Definition: AMReX_FabArray.H:179
std::enable_if_t< IsFabArray< MF >::value &&!std::is_same_v< Interp, MFInterpolater > > InterpFace(Interp *interp, MF const &mf_crse_patch, int crse_comp, MF &mf_refined_patch, int fine_comp, int ncomp, const IntVect &ratio, const iMF &solve_mask, const Geometry &crse_geom, const Geometry &fine_geom, int bcscomp, RunOn gpu_or_cpu, const Vector< BCRec > &bcs)
Definition: AMReX_FillPatchUtil_I.H:253
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
std::enable_if_t< IsFabArray< MF >::value > FillPatchTwoLevels(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &cmf, const Vector< Real > &ct, const Vector< MF * > &fmf, const Vector< Real > &ft, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
FillPatch with data from the current level and the level below.
Definition: AMReX_FillPatchUtil_I.H:796
BoxArray const & boxArray(FabArrayBase const &fa)
std::enable_if_t< IsFabArray< MF >::value > InterpFromCoarseLevel(MF &mf, Real time, const MF &cmf, int scomp, int dcomp, int ncomp, const Geometry &cgeom, const Geometry &fgeom, BC &cbc, int cbccomp, BC &fbc, int fbccomp, const IntVect &ratio, Interp *mapper, const Vector< BCRec > &bcs, int bcscomp, const PreInterpHook &pre_interp={}, const PostInterpHook &post_interp={})
Fill with interpolation of coarse level data.
Definition: AMReX_FillPatchUtil_I.H:984
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
std::enable_if_t< IsFabArray< MF >::value > FillPatchNLevels(MF &mf, int level, const IntVect &nghost, Real time, const Vector< Vector< MF * >> &smf, const Vector< Vector< Real >> &st, int scomp, int dcomp, int ncomp, const Vector< Geometry > &geom, Vector< BC > &bc, int bccomp, const Vector< IntVect > &ratio, Interp *mapper, const Vector< BCRec > &bcr, int bcrcomp)
FillPatch with data from AMR levels.
Definition: AMReX_FillPatchUtil_I.H:1308
void FillPatchInterp(MultiFab &mf_fine_patch, int fcomp, MultiFab const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, MFInterpolater *mapper, const Vector< BCRec > &bcs, int bcscomp)
Definition: AMReX_FillPatchUtil.cpp:140
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::enable_if_t< std::is_floating_point_v< T >, bool > almostEqual(T x, T y, int ulp=2)
Definition: AMReX_Algorithm.H:93
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition: AMReX_Box.H:1342
bool TilingIfNotGPU() noexcept
Definition: AMReX_MFIter.H:12
void setBC(const Box &bx, const Box &domain, int src_comp, int dest_comp, int ncomp, const Vector< BCRec > &bc_dom, Vector< BCRec > &bcr) noexcept
Function for setting array of BCs.
Definition: AMReX_BCRec.cpp:8
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
IndexTypeND< AMREX_SPACEDIM > IndexType
Definition: AMReX_BaseFwd.H:33
std::unique_ptr< EBFArrayBoxFactory > makeEBFabFactory(const Geometry &a_geom, const BoxArray &a_ba, const DistributionMapping &a_dm, const Vector< int > &a_ngrow, EBSupport a_support)
Definition: AMReX_EBFabFactory.cpp:119
std::array< T, N > Array
Definition: AMReX_Array.H:24
Definition: AMReX_FabArrayCommI.H:841
parallel copy or add
Definition: AMReX_FabArrayBase.H:536
Definition: AMReX_FabArrayBase.H:304
std::unique_ptr< FabFactory< FArrayBox > > fact_crse_patch
Definition: AMReX_FabArrayBase.H:319
DistributionMapping dm_patch
Definition: AMReX_FabArrayBase.H:318
BoxArray ba_crse_patch
Definition: AMReX_FabArrayBase.H:316
std::unique_ptr< FabFactory< FArrayBox > > fact_fine_patch
Definition: AMReX_FabArrayBase.H:320
BoxArray ba_fine_patch
Definition: AMReX_FabArrayBase.H:317
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_FillPatchUtil.H:34