Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_FillPatcher.H
Go to the documentation of this file.
1#ifndef AMREX_FILLPATCHER_H_
2#define AMREX_FILLPATCHER_H_
3#include <AMReX_Config.H>
4
6#include <utility>
7
13namespace amrex {
14
76template <class MF = MultiFab>
78{
79public:
80
95 FillPatcher (BoxArray const& fba, DistributionMapping const& fdm,
96 Geometry const& fgeom,
97 BoxArray const& cba, DistributionMapping const& cdm, // NOLINT
98 Geometry const& cgeom,
99 IntVect const& nghost, int ncomp, InterpBase* interp,
100#ifdef AMREX_USE_EB
101 EB2::IndexSpace const* eb_index_space = EB2::TopIndexSpaceIfPresent());
102#else
103 EB2::IndexSpace const* eb_index_space = nullptr);
104#endif
105
129 template <typename BC,
130 typename PreInterpHook=NullInterpHook<MF>,
131 typename PostInterpHook=NullInterpHook<MF> >
132 void fill (MF& mf, IntVect const& nghost, Real time,
133 Vector<MF*> const& cmf, Vector<Real> const& ct,
134 Vector<MF*> const& fmf, Vector<Real> const& ft,
135 int scomp, int dcomp, int ncomp,
136 BC& cbc, int cbccomp, BC& fbc, int fbccomp,
137 Vector<BCRec> const& bcs, int bcscomp,
138 PreInterpHook const& pre_interp = {},
139 PostInterpHook const& post_interp = {});
140
160 template <typename BC,
161 typename PreInterpHook=NullInterpHook<MF>,
162 typename PostInterpHook=NullInterpHook<MF> >
163 void fillCoarseFineBoundary (MF& mf, IntVect const& nghost, Real time,
164 Vector<MF*> const& cmf,
165 Vector<Real> const& ct,
166 int scomp, int dcomp, int ncomp,
167 BC& cbc, int cbccomp,
168 Vector<BCRec> const& bcs, int bcscomp,
169 PreInterpHook const& pre_interp = {},
170 PostInterpHook const& post_interp = {});
171
181 template <std::size_t order>
182 void storeRKCoarseData (Real time, Real dt, MF const& S_old,
183 Array<MF,order> const& RK_k);
184
201 template <typename BC>
202 void fillRK (int stage, int iteration, int ncycle, MF& mf, Real time,
203 BC& cbc, BC& fbc, Vector<BCRec> const& bcs);
204
205private:
206
207 BoxArray m_fba;
208 BoxArray m_cba;
211 Geometry m_fgeom;
212 Geometry m_cgeom;
213 IntVect m_nghost;
214 int m_ncomp;
215 InterpBase* m_interp;
216 EB2::IndexSpace const* m_eb_index_space = nullptr;
217 MF m_sfine;
218 IntVect m_ratio;
220 std::unique_ptr<MF> m_cf_crse_data_tmp;
221 std::unique_ptr<MF> m_cf_fine_data;
222 Real m_dt_coarse = std::numeric_limits<Real>::lowest();
223
224 FabArrayBase::FPinfo const& getFPinfo ();
225};
226
227template <class MF>
229 Geometry const& fgeom,
230 BoxArray const& cba, DistributionMapping const& cdm, // NOLINT
231 Geometry const& cgeom,
232 IntVect const& nghost, int ncomp, InterpBase* interp,
233 EB2::IndexSpace const* eb_index_space)
234 : m_fba(fba),
235 m_cba(cba),
236 m_fdm(fdm),
237 m_cdm(cdm),
238 m_fgeom(fgeom),
239 m_cgeom(cgeom),
240 m_nghost(nghost),
241 m_ncomp(ncomp),
242 m_interp(interp),
243 m_eb_index_space(eb_index_space),
244 m_sfine(fba, fdm, 1, nghost, MFInfo().SetAlloc(false))
245{
246 static_assert(IsFabArray<MF>::value,
247 "FillPatcher<MF>: MF must be FabArray type");
249
250 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
251 m_ratio[idim] = m_fgeom.Domain().length(idim) / m_cgeom.Domain().length(idim);
252 }
253 AMREX_ASSERT(m_fgeom.Domain() == amrex::refine(m_cgeom.Domain(),m_ratio));
254}
255
256template <class MF>
257template <typename BC, typename PreInterpHook, typename PostInterpHook>
258void
259FillPatcher<MF>::fill (MF& mf, IntVect const& nghost, Real time,
260 Vector<MF*> const& cmf, Vector<Real> const& ct,
261 Vector<MF*> const& fmf, Vector<Real> const& ft,
262 int scomp, int dcomp, int ncomp,
263 BC& cbc, int cbccomp,
264 BC& fbc, int fbccomp,
265 Vector<BCRec> const& bcs, int bcscomp,
266 PreInterpHook const& pre_interp,
267 PostInterpHook const& post_interp)
268{
269 BL_PROFILE("FillPatcher::fill()");
270
271 AMREX_ALWAYS_ASSERT(!cmf.empty() && cmf.size() == ct.size() &&
272 !fmf.empty() && fmf.size() == ft.size() &&
273 m_fba == fmf[0]->boxArray() && m_fdm == fmf[0]->DistributionMap());
274
275 fillCoarseFineBoundary(mf, nghost, time, cmf, ct, scomp, dcomp, ncomp,
276 cbc, cbccomp, bcs, bcscomp, pre_interp, post_interp);
277
278 FillPatchSingleLevel(mf, nghost, time, fmf, ft, scomp, dcomp, ncomp,
279 m_fgeom, fbc, fbccomp);
280}
281
282template <class MF>
285{
286 const InterpolaterBoxCoarsener& coarsener = m_interp->BoxCoarsener(m_ratio);
287 return FabArrayBase::TheFPinfo(m_sfine, m_sfine, m_nghost, coarsener,
288 m_fgeom, m_cgeom, m_eb_index_space);
289}
290
291template <class MF>
292template <typename BC, typename PreInterpHook, typename PostInterpHook>
293void
295 Vector<MF*> const& cmf,
296 Vector<Real> const& ct,
297 int scomp, int dcomp, int ncomp,
298 BC& cbc, int cbccomp,
299 Vector<BCRec> const& bcs, int bcscomp,
300 PreInterpHook const& pre_interp,
301 PostInterpHook const& post_interp)
302{
303 BL_PROFILE("FillPatcher::fillCFB");
304
305 AMREX_ALWAYS_ASSERT(!cmf.empty() && cmf.size() == ct.size() &&
306 nghost.allLE(m_nghost) &&
307 m_fba == mf.boxArray() &&
308 m_fdm == mf.DistributionMap() &&
309 m_cba == cmf[0]->boxArray() &&
310 m_cdm == cmf[0]->DistributionMap() &&
311 m_ncomp >= ncomp &&
312 m_ncomp == cmf[0]->nComp());
313
314 auto const& fpc = getFPinfo();
315
316 if ( ! fpc.ba_crse_patch.empty())
317 {
318 if (m_cf_fine_data == nullptr) {
319 m_cf_fine_data = std::make_unique<MF>
320 (detail::make_mf_fine_patch<MF>(fpc, m_ncomp));
321 }
322
323 int ncmfs = cmf.size();
324 for (int icmf = 0; icmf < ncmfs; ++icmf) {
325 Real t = ct[icmf];
326 auto it = std::find_if(m_cf_crse_data.begin(), m_cf_crse_data.end(),
327 [=] (auto const& x) {
328 return amrex::almostEqual(x.first,t,5);
329 });
330
331 if (it == std::end(m_cf_crse_data)) {
332 MF mf_crse_patch = detail::make_mf_crse_patch<MF>(fpc, m_ncomp);
333 mf_crse_patch.ParallelCopy(*cmf[icmf], m_cgeom.periodicity());
334
335 std::pair<Real,std::unique_ptr<MF>> tmp;
336 tmp.first = t;
337 tmp.second = std::make_unique<MF>(std::move(mf_crse_patch));
338 m_cf_crse_data.push_back(std::move(tmp));
339 }
340 }
341
342 if (m_cf_crse_data_tmp == nullptr) {
343 m_cf_crse_data_tmp = std::make_unique<MF>
344 (detail::make_mf_crse_patch<MF>(fpc, m_ncomp));
345 }
346
347 int const ng_space_interp = 8; // Need to be big enough
348 Box domain = m_cgeom.growPeriodicDomain(ng_space_interp);
349 domain.convert(mf.ixType());
350
351 int idata = -1;
352 if (m_cf_crse_data.size() == 1) {
353 idata = 0;
354 } else if (m_cf_crse_data.size() == 2) {
355 Real const teps = std::abs(m_cf_crse_data[1].first -
356 m_cf_crse_data[0].first) * 1.e-3_rt;
357 if (time > m_cf_crse_data[0].first - teps &&
358 time < m_cf_crse_data[0].first + teps) {
359 idata = 0;
360 } else if (time > m_cf_crse_data[1].first - teps &&
361 time < m_cf_crse_data[1].first + teps) {
362 idata = 1;
363 } else {
364 idata = 2;
365 }
366 }
367
368 if (idata == 0 || idata == 1) {
369 auto const& dst = m_cf_crse_data_tmp->arrays();
370 auto const& src = m_cf_crse_data[idata].second->const_arrays();
371 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), ncomp,
372 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
373 {
374 if (domain.contains(i,j,k)) {
375 dst[bi](i,j,k,n) = src[bi](i,j,k,n+scomp);
376 }
377 });
378 } else if (idata == 2) {
379 Real t0 = m_cf_crse_data[0].first;
380 Real t1 = m_cf_crse_data[1].first;
381 Real alpha = (t1-time)/(t1-t0);
382 Real beta = (time-t0)/(t1-t0);
383 auto const& a = m_cf_crse_data_tmp->arrays();
384 auto const& a0 = m_cf_crse_data[0].second->const_arrays();
385 auto const& a1 = m_cf_crse_data[1].second->const_arrays();
386 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), ncomp,
387 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
388 {
389 if (domain.contains(i,j,k)) {
390 a[bi](i,j,k,n)
391 = alpha*a0[bi](i,j,k,scomp+n)
392 + beta*a1[bi](i,j,k,scomp+n);
393 }
394 });
395 }
396 else
397 {
398 amrex::Abort("FillPatcher: High order interpolation in time not supported. Or FillPatcher was not properly deleted.");
399 }
400 // Sync required: user callback cbc may read fab on host (e.g. if pinned)
402
403 cbc(*m_cf_crse_data_tmp, 0, ncomp, m_cf_crse_data_tmp->nGrowVect(), time, cbccomp);
404
405 detail::call_interp_hook(pre_interp, *m_cf_crse_data_tmp, 0, ncomp);
406
407 FillPatchInterp(*m_cf_fine_data, scomp, *m_cf_crse_data_tmp, 0,
408 ncomp, IntVect(0), m_cgeom, m_fgeom,
409 amrex::grow(amrex::convert(m_fgeom.Domain(),
410 mf.ixType()),nghost),
411 m_ratio, m_interp, bcs, bcscomp);
412
413 detail::call_interp_hook(post_interp, *m_cf_fine_data, scomp, ncomp);
414
415 mf.ParallelCopy(*m_cf_fine_data, scomp, dcomp, ncomp, IntVect{0}, nghost);
416 }
417}
418
419template <typename MF>
420template <std::size_t order>
421void FillPatcher<MF>::storeRKCoarseData (Real /*time*/, Real dt, MF const& S_old,
422 Array<MF,order> const& RK_k)
423{
424 BL_PROFILE("FillPatcher::storeRKCoarseData()");
425 m_dt_coarse = dt;
426 m_cf_crse_data.resize(order+1);
427
428 auto const& fpc = getFPinfo();
429
430 for (auto& tmf : m_cf_crse_data) {
431 tmf.first = std::numeric_limits<Real>::lowest(); // because we don't need it
432 tmf.second = std::make_unique<MF>(detail::make_mf_crse_patch<MF>(fpc, m_ncomp));
433 }
434 m_cf_crse_data[0].second->ParallelCopy(S_old, m_cgeom.periodicity());
435 for (std::size_t i = 0; i < order; ++i) {
436 m_cf_crse_data[i+1].second->ParallelCopy(RK_k[i], m_cgeom.periodicity());
437 }
438}
439
440template <typename MF>
441template <typename BC>
442void FillPatcher<MF>::fillRK (int stage, int iteration, int ncycle,
443 MF& mf, Real time, BC& cbc, BC& fbc,
444 Vector<BCRec> const& bcs)
445{
446 BL_PROFILE("FillPatcher::fillRK()");
447 int rk_order = m_cf_crse_data.size()-1;
448 if (rk_order != 3 && rk_order != 4) {
449 amrex::Abort("FillPatcher: unsupported RK order "+std::to_string(rk_order));
450 return;
451 }
452 AMREX_ASSERT(stage > 0 && stage <= rk_order);
453
454 auto const& fpc = getFPinfo();
455 if (m_cf_crse_data_tmp == nullptr) {
456 m_cf_crse_data_tmp = std::make_unique<MF>
457 (detail::make_mf_crse_patch<MF>(fpc, m_ncomp));
458 }
459
460 auto const& u = m_cf_crse_data_tmp->arrays();
461 auto const& u0 = m_cf_crse_data[0].second->const_arrays();
462 auto const& k1 = m_cf_crse_data[1].second->const_arrays();
463 auto const& k2 = m_cf_crse_data[2].second->const_arrays();
464 auto const& k3 = m_cf_crse_data[3].second->const_arrays();
465
466 Real dtc = m_dt_coarse;
467 Real r = Real(1) / Real(ncycle);
468 Real xsi = Real(iteration-1) / Real(ncycle);
469
470 int const ng_space_interp = 8; // Need to be big enough
471 Box cdomain = m_cgeom.growPeriodicDomain(ng_space_interp);
472 cdomain.convert(m_cf_crse_data_tmp->ixType());
473
474 if (rk_order == 3) {
475 // coefficients for U
476 Real b1 = xsi - Real(5./6.)*xsi*xsi;
477 Real b2 = Real(1./6.)*xsi*xsi;
478 Real b3 = Real(2./3)*xsi*xsi;
479 // coefficients for Ut
480 Real c1 = Real(1.) - Real(5./3.)*xsi;
481 Real c2 = Real(1./3.)*xsi;
482 Real c3 = Real(4./3.)*xsi;
483 // coefficients for Utt
484 constexpr Real d1 = Real(-5./3.);
485 constexpr Real d2 = Real(1./3.);
486 constexpr Real d3 = Real(4./3.);
487 if (stage == 1) {
488 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
489 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
490 {
491 if (cdomain.contains(i,j,k)) {
492 Real kk1 = k1[bi](i,j,k,n);
493 Real kk2 = k2[bi](i,j,k,n);
494 Real kk3 = k3[bi](i,j,k,n);
495 Real uu = b1*kk1 + b2*kk2 + b3*kk3;
496 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc*uu;
497 }
498 });
499 } else if (stage == 2) {
500 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
501 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
502 {
503 if (cdomain.contains(i,j,k)) {
504 Real kk1 = k1[bi](i,j,k,n);
505 Real kk2 = k2[bi](i,j,k,n);
506 Real kk3 = k3[bi](i,j,k,n);
507 Real uu = b1*kk1 + b2*kk2 + b3*kk3;
508 Real ut = c1*kk1 + c2*kk2 + c3*kk3;
509 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc*(uu + r*ut);
510 }
511 });
512 } else if (stage == 3) {
513 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
514 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
515 {
516 if (cdomain.contains(i,j,k)) {
517 Real kk1 = k1[bi](i,j,k,n);
518 Real kk2 = k2[bi](i,j,k,n);
519 Real kk3 = k3[bi](i,j,k,n);
520 Real uu = b1*kk1 + b2*kk2 + b3*kk3;
521 Real ut = c1*kk1 + c2*kk2 + c3*kk3;
522 Real utt = d1*kk1 + d2*kk2 + d3*kk3;
523 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc*
524 (uu + Real(0.5)*r*ut + Real(0.25)*r*r*utt);
525 }
526 });
527 }
528 } else if (rk_order == 4) {
529 auto const& k4 = m_cf_crse_data[4].second->const_arrays();
530 Real xsi2 = xsi*xsi;
531 Real xsi3 = xsi2*xsi;
532 // coefficients for U
533 Real b1 = xsi - Real(1.5)*xsi2 + Real(2./3.)*xsi3;
534 Real b2 = xsi2 - Real(2./3.)*xsi3;
535 Real b3 = b2;
536 Real b4 = Real(-0.5)*xsi2 + Real(2./3.)*xsi3;
537 // coefficients for Ut
538 Real c1 = Real(1.) - Real(3.)*xsi + Real(2.)*xsi2;
539 Real c2 = Real(2.)*xsi - Real(2.)*xsi2;
540 Real c3 = c2;
541 Real c4 = -xsi + Real(2.)*xsi2;
542 // coefficients for Utt
543 Real d1 = Real(-3.) + Real(4.)*xsi;
544 Real d2 = Real( 2.) - Real(4.)*xsi;
545 Real d3 = d2;
546 Real d4 = Real(-1.) + Real(4.)*xsi;
547 // coefficients for Uttt
548 constexpr Real e1 = Real( 4.);
549 constexpr Real e2 = Real(-4.);
550 constexpr Real e3 = Real(-4.);
551 constexpr Real e4 = Real( 4.);
552 if (stage == 1) {
553 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
554 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
555 {
556 if (cdomain.contains(i,j,k)) {
557 Real kk1 = k1[bi](i,j,k,n);
558 Real kk2 = k2[bi](i,j,k,n);
559 Real kk3 = k3[bi](i,j,k,n);
560 Real kk4 = k4[bi](i,j,k,n);
561 Real uu = b1*kk1 + b2*kk2 + b3*kk3 + b4*kk4;
562 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc*uu;
563 }
564 });
565 } else if (stage == 2) {
566 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
567 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
568 {
569 if (cdomain.contains(i,j,k)) {
570 Real kk1 = k1[bi](i,j,k,n);
571 Real kk2 = k2[bi](i,j,k,n);
572 Real kk3 = k3[bi](i,j,k,n);
573 Real kk4 = k4[bi](i,j,k,n);
574 Real uu = b1*kk1 + b2*kk2 + b3*kk3 + b4*kk4;
575 Real ut = c1*kk1 + c2*kk2 + c3*kk3 + c4*kk4;
576 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc*(uu + Real(0.5)*r*ut);
577 }
578 });
579 } else if (stage == 3 || stage == 4) {
580 Real r2 = r*r;
581 Real r3 = r2*r;
582 Real at = (stage == 3) ? Real(0.5)*r : r;
583 Real att = (stage == 3) ? Real(0.25)*r2 : Real(0.5)*r2;
584 Real attt = (stage == 3) ? Real(0.0625)*r3 : Real(0.125)*r3;
585 Real akk = (stage == 3) ? Real(-4.) : Real(4.);
586 amrex::ParallelFor(*m_cf_crse_data_tmp, IntVect(0), m_ncomp,
587 [=] AMREX_GPU_DEVICE (int bi, int i, int j, int k, int n) noexcept
588 {
589 if (cdomain.contains(i,j,k)) {
590 Real kk1 = k1[bi](i,j,k,n);
591 Real kk2 = k2[bi](i,j,k,n);
592 Real kk3 = k3[bi](i,j,k,n);
593 Real kk4 = k4[bi](i,j,k,n);
594 Real uu = b1*kk1 + b2*kk2 + b3*kk3 + b4*kk4;
595 Real ut = c1*kk1 + c2*kk2 + c3*kk3 + c4*kk4;
596 Real utt = d1*kk1 + d2*kk2 + d3*kk3 + d4*kk4;
597 Real uttt = e1*kk1 + e2*kk2 + e3*kk3 + e4*kk4;
598 u[bi](i,j,k,n) = u0[bi](i,j,k,n) + dtc *
599 (uu + at*ut + att*utt + attt*(uttt+akk*(kk3-kk2)));
600 }
601 });
602 }
603 }
604 // Sync required: user callback cbc may read fab on host (e.g. if pinned)
606
607 cbc(*m_cf_crse_data_tmp, 0, m_ncomp, m_cf_crse_data_tmp->nGrowVect(), time, 0);
608
609 if (m_cf_fine_data == nullptr) {
610 m_cf_fine_data = std::make_unique<MF>(detail::make_mf_fine_patch<MF>(fpc, m_ncomp));
611 }
612
613 FillPatchInterp(*m_cf_fine_data, 0, *m_cf_crse_data_tmp, 0,
614 m_ncomp, IntVect(0), m_cgeom, m_fgeom,
615 amrex::grow(amrex::convert(m_fgeom.Domain(),
616 mf.ixType()),m_nghost),
617 m_ratio, m_interp, bcs, 0);
618
619 // xxxxx We can optimize away this ParallelCopy by making a special fpinfo.
620 mf.ParallelCopy(*m_cf_fine_data, 0, 0, m_ncomp, IntVect(0), m_nghost);
621
622 mf.FillBoundary(m_fgeom.periodicity());
623 fbc(mf, 0, m_ncomp, m_nghost, time, 0);
624}
625
626}
627
628#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
High-level FillPatch helpers for AMR coarse-to-fine synchronization.
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:568
IndexType ixType() const noexcept
Return index type of this BoxArray.
Definition AMReX_BoxArray.H:858
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ BoxND & convert(IndexTypeND< dim > typ) noexcept
Convert the BoxND from the current type into the argument type. This may change the BoxND coordinates...
Definition AMReX_Box.H:974
__host__ __device__ bool contains(const IntVectND< dim > &p) const noexcept
Return true if argument is contained within BoxND.
Definition AMReX_Box.H:212
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Definition AMReX_EB2.H:28
static const FPinfo & TheFPinfo(const FabArrayBase &srcfa, const FabArrayBase &dstfa, const IntVect &dstng, const BoxConverter &coarsener, const Geometry &fgeom, const Geometry &cgeom, const EB2::IndexSpace *)
Definition AMReX_FabArrayBase.cpp:2065
FillPatcher is for filling a fine level MultiFab/FabArray.
Definition AMReX_FillPatcher.H:78
void fillCoarseFineBoundary(MF &mf, IntVect const &nghost, Real time, Vector< MF * > const &cmf, Vector< Real > const &ct, int scomp, int dcomp, int ncomp, BC &cbc, int cbccomp, Vector< BCRec > const &bcs, int bcscomp, PreInterpHook const &pre_interp={}, PostInterpHook const &post_interp={})
Function to fill data at coarse/fine boundary only.
Definition AMReX_FillPatcher.H:294
void fill(MF &mf, IntVect const &nghost, Real time, Vector< MF * > const &cmf, Vector< Real > const &ct, Vector< MF * > const &fmf, Vector< Real > const &ft, int scomp, int dcomp, int ncomp, BC &cbc, int cbccomp, BC &fbc, int fbccomp, Vector< BCRec > const &bcs, int bcscomp, PreInterpHook const &pre_interp={}, PostInterpHook const &post_interp={})
Function to fill data.
Definition AMReX_FillPatcher.H:259
void fillRK(int stage, int iteration, int ncycle, MF &mf, Real time, BC &cbc, BC &fbc, Vector< BCRec > const &bcs)
Fill ghost cells of fine AMR level for RK3 and RK4.
Definition AMReX_FillPatcher.H:442
void storeRKCoarseData(Real time, Real dt, MF const &S_old, Array< MF, order > const &RK_k)
Store coarse AMR level data for RK3 and RK4.
Definition AMReX_FillPatcher.H:421
FillPatcher(BoxArray const &fba, DistributionMapping const &fdm, Geometry const &fgeom, BoxArray const &cba, DistributionMapping const &cdm, Geometry const &cgeom, IntVect const &nghost, int ncomp, InterpBase *interp, EB2::IndexSpace const *eb_index_space=EB2::TopIndexSpaceIfPresent())
Constructor of FillPatcher.
Definition AMReX_FillPatcher.H:228
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
const Box & Domain() const noexcept
Returns our rectangular domain.
Definition AMReX_Geometry.H:211
__host__ __device__ bool cellCentered() const noexcept
True if the IndexTypeND is CELL based in all directions.
Definition AMReX_IndexType.H:104
__host__ __device__ bool nodeCentered() const noexcept
True if the IndexTypeND is NODE based in all directions.
Definition AMReX_IndexType.H:110
__host__ __device__ constexpr bool allLE(const IntVectND< dim > &rhs) const noexcept
Returns true if this is less than or equal to argument for all components. NOTE: This is NOT a strict...
Definition AMReX_IntVect.H:400
Definition AMReX_InterpBase.H:34
Definition AMReX_InterpBase.H:20
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
__host__ __device__ BoxND< dim > grow(const BoxND< dim > &b, int i) noexcept
Grow BoxND in all directions by given amount.
Definition AMReX_Box.H:1280
__host__ __device__ BoxND< dim > refine(const BoxND< dim > &b, int ref_ratio) noexcept
Definition AMReX_Box.H:1459
std::array< T, N > Array
Definition AMReX_Array.H:26
const IndexSpace * TopIndexSpaceIfPresent() noexcept
Definition AMReX_EB2.cpp:93
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
std::enable_if_t< IsFabArray< MF >::value > FillPatchSingleLevel(MF &mf, IntVect const &nghost, Real time, const Vector< MF * > &smf, const Vector< Real > &stime, int scomp, int dcomp, int ncomp, const Geometry &geom, BC &physbcf, int bcfcomp)
FillPatch with data from the current level.
Definition AMReX_FillPatchUtil_I.H:75
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
void FillPatchInterp(MultiFab &mf_fine_patch, int fcomp, MultiFab const &mf_crse_patch, int ccomp, int ncomp, IntVect const &ng, const Geometry &cgeom, const Geometry &fgeom, Box const &dest_domain, const IntVect &ratio, MFInterpolater *mapper, const Vector< BCRec > &bcs, int bcscomp)
Helper that applies a MFInterpolater to fill a fine patch from a coarse patch.
Definition AMReX_FillPatchUtil.cpp:140
Definition AMReX_FabArrayBase.H:305
Definition AMReX_TypeTraits.H:29
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_FillPatchUtil.H:39