1#ifndef AMREX_ML_ABECLAPLACIAN_H_
2#define AMREX_ML_ABECLAPLACIAN_H_
3#include <AMReX_Config.H>
6#include <AMReX_MLABecLap_K.H>
18 using FAB =
typename MF::fab_type;
19 using RT =
typename MF::value_type;
67 template <
typename T1,
typename T2,
68 std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
69 std::is_convertible_v<T2,typename MF::value_type>,
82 template <
typename AMF,
83 std::enable_if_t<IsFabArray<AMF>::value &&
84 std::is_convertible_v<
typename AMF::value_type,
85 typename MF::value_type>,
87 void setACoeffs (
int amrlev,
const AMF& alpha);
99 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
112 template <
typename AMF,
113 std::enable_if_t<IsFabArray<AMF>::value &&
114 std::is_convertible_v<
typename AMF::value_type,
115 typename MF::value_type>,
117 void setBCoeffs (
int amrlev,
const Array<AMF const*,AMREX_SPACEDIM>& beta);
128 template <
typename T,
129 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
142 template <
typename T,
143 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
145 void setBCoeffs (
int amrlev, Vector<T>
const& beta);
147 [[nodiscard]]
int getNComp ()
const override {
return m_ncomp; }
157 void Fapply (
int amrlev,
int mglev, MF& out,
const MF& in)
const override;
158 void Fsmooth (
int amrlev,
int mglev, MF& sol,
const MF& rhs,
int redblack)
const override;
162 int face_only=0)
const override;
164 void normalize (
int amrlev,
int mglev, MF& mf)
const override;
168 [[nodiscard]] MF
const*
getACoeffs (
int amrlev,
int mglev)
const final
173 [[nodiscard]] std::unique_ptr<MLLinOpT<MF>>
makeNLinOp (
int )
const final;
191 FAB const& sol,
int face_only,
int ncomp);
213 void define_ab_coeffs ();
215 void update_singular_flags ();
218template <
typename MF>
226 define(a_geom, a_grids, a_dmap, a_info, a_factory, a_ncomp);
229template <
typename MF>
238 define(a_geom, a_grids, a_dmap, a_overset_mask, a_info, a_factory, a_ncomp);
243template <
typename MF>
253 this->m_ncomp = a_ncomp;
258template <
typename MF>
268 BL_PROFILE(
"MLABecLaplacian::define(overset)");
269 this->m_ncomp = a_ncomp;
274template <
typename MF>
278 m_a_coeffs.resize(this->m_num_amr_levels);
279 m_b_coeffs.resize(this->m_num_amr_levels);
280 for (
int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev)
282 m_a_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
283 m_b_coeffs[amrlev].resize(this->m_num_mg_levels[amrlev]);
284 for (
int mglev = 0; mglev < this->m_num_mg_levels[amrlev]; ++mglev)
286 m_a_coeffs[amrlev][mglev].
define
287 (this->m_grids[amrlev][mglev], this->m_dmap[amrlev][mglev],
288 1, 0,
MFInfo(), *(this->m_factory[amrlev][mglev]));
289 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim)
293 m_b_coeffs[amrlev][mglev][idim].define
294 (ba, this->m_dmap[amrlev][mglev], m_ncomp, 0,
MFInfo(),
295 *(this->m_factory[amrlev][mglev]));
301template <
typename MF>
302template <
typename T1,
typename T2,
303 std::enable_if_t<std::is_convertible_v<T1,typename MF::value_type> &&
304 std::is_convertible_v<T2,typename MF::value_type>,
int>>
310 if (m_a_scalar ==
RT(0.0)) {
311 for (
int amrlev = 0; amrlev < this->m_num_amr_levels; ++amrlev) {
312 m_a_coeffs[amrlev][0].setVal(
RT(0.0));
316 m_scalars_set =
true;
319template <
typename MF>
320template <
typename AMF,
321 std::enable_if_t<IsFabArray<AMF>::value &&
322 std::is_convertible_v<
typename AMF::value_type,
323 typename MF::value_type>,
int>>
328 "MLABecLaplacian::setACoeffs: alpha is supposed to be single component.");
329 m_a_coeffs[amrlev][0].LocalCopy(alpha, 0, 0, 1,
IntVect(0));
330 m_needs_update =
true;
334template <
typename MF>
336 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
int>>
340 m_a_coeffs[amrlev][0].setVal(
RT(alpha));
341 m_needs_update =
true;
346template <
typename MF>
347template <
typename AMF,
348 std::enable_if_t<IsFabArray<AMF>::value &&
349 std::is_convertible_v<
typename AMF::value_type,
350 typename MF::value_type>,
int>>
355 const int ncomp = this->getNComp();
357 if (beta[0]->
nComp() == ncomp) {
358 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
359 for (
int icomp = 0; icomp < ncomp; ++icomp) {
360 m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], icomp, icomp, 1,
IntVect(0));
364 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
365 for (
int icomp = 0; icomp < ncomp; ++icomp) {
366 m_b_coeffs[amrlev][0][idim].LocalCopy(*beta[idim], 0, icomp, 1,
IntVect(0));
370 m_needs_update =
true;
373template <
typename MF>
375 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
int>>
379 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
380 m_b_coeffs[amrlev][0][idim].setVal(
RT(beta));
382 m_needs_update =
true;
385template <
typename MF>
387 std::enable_if_t<std::is_convertible_v<T,typename MF::value_type>,
int>>
391 const int ncomp = this->getNComp();
392 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
393 for (
int icomp = 0; icomp < ncomp; ++icomp) {
394 m_b_coeffs[amrlev][0][idim].setVal(
RT(beta[icomp]));
397 m_needs_update =
true;
400template <
typename MF>
408#if (AMREX_SPACEDIM != 3)
409 applyMetricTermsCoeffs();
412 applyRobinBCTermsCoeffs();
416 update_singular_flags();
418 m_needs_update =
false;
421template <
typename MF>
425 BL_PROFILE(
"MLABecLaplacian::prepareForSolve()");
429#if (AMREX_SPACEDIM != 3)
430 applyMetricTermsCoeffs();
433 applyRobinBCTermsCoeffs();
437 update_singular_flags();
439 m_needs_update =
false;
442template <
typename MF>
446#if (AMREX_SPACEDIM != 3)
447 for (
int alev = 0; alev < this->m_num_amr_levels; ++alev)
450 this->applyMetricTerm(alev, mglev, m_a_coeffs[alev][mglev]);
451 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim)
453 this->applyMetricTerm(alev, mglev, m_b_coeffs[alev][mglev][idim]);
488template <
typename LP>
489void applyRobinBCTermsCoeffs (LP& linop)
491 using RT =
typename LP::RT;
493 const int ncomp = linop.getNComp();
494 bool reset_alpha =
false;
495 if (linop.m_a_scalar == RT(0.0)) {
496 linop.m_a_scalar = RT(1.0);
499 const RT bovera = linop.m_b_scalar/linop.m_a_scalar;
503 "To reuse solver With Robin BC, one must re-call setScalars (and setACoeffs if the scalar is not zero)");
506 linop.m_scalars_set =
false;
507 linop.m_acoef_set =
false;
509 for (
int amrlev = 0; amrlev < linop.NAMRLevels(); ++amrlev) {
511 const Box& domain = linop.Geom(amrlev,mglev).Domain();
512 const RT dxi =
static_cast<RT
>(linop.Geom(amrlev,mglev).InvCellSize(0));
513 const RT dyi =
static_cast<RT
>((AMREX_SPACEDIM >= 2) ? linop.Geom(amrlev,mglev).InvCellSize(1) :
Real(1.0));
514 const RT dzi =
static_cast<RT
>((AMREX_SPACEDIM == 3) ? linop.Geom(amrlev,mglev).InvCellSize(2) :
Real(1.0));
517 linop.m_a_coeffs[amrlev][mglev].setVal(RT(0.0));
524#pragma omp parallel if (Gpu::notInLaunchRegion())
526 for (MFIter mfi(linop.m_a_coeffs[amrlev][mglev], mfi_info); mfi.isValid(); ++mfi)
528 const Box& vbx = mfi.validbox();
529 auto const& afab = linop.m_a_coeffs[amrlev][mglev].array(mfi);
530 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
531 auto const& bfab = linop.m_b_coeffs[amrlev][mglev][idim].const_array(mfi);
534 bool outside_domain_lo = !(domain.contains(blo));
535 bool outside_domain_hi = !(domain.contains(bhi));
536 if ((!outside_domain_lo) && (!outside_domain_hi)) {
continue; }
537 for (
int icomp = 0; icomp < ncomp; ++icomp) {
538 auto const& rbc = (*(linop.m_robin_bcval[amrlev]))[mfi].const_array(icomp*3);
542 RT fac = bovera*dxi*dxi;
545 RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
546 / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
547 afab(i+1,j,k,icomp) += fac*bfab(i+1,j,k,icomp)*(RT(1.0)-B);
549 }
else if (idim == 1) {
550 RT fac = bovera*dyi*dyi;
553 RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
554 / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
555 afab(i,j+1,k,icomp) += fac*bfab(i,j+1,k,icomp)*(RT(1.0)-B);
558 RT fac = bovera*dzi*dzi;
561 RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
562 / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
563 afab(i,j,k+1,icomp) += fac*bfab(i,j,k+1,icomp)*(RT(1.0)-B);
570 RT fac = bovera*dxi*dxi;
573 RT B = (rbc(i,j,k,1)*dxi - rbc(i,j,k,0)*RT(0.5))
574 / (rbc(i,j,k,1)*dxi + rbc(i,j,k,0)*RT(0.5));
575 afab(i-1,j,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
577 }
else if (idim == 1) {
578 RT fac = bovera*dyi*dyi;
581 RT B = (rbc(i,j,k,1)*dyi - rbc(i,j,k,0)*RT(0.5))
582 / (rbc(i,j,k,1)*dyi + rbc(i,j,k,0)*RT(0.5));
583 afab(i,j-1,k,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
586 RT fac = bovera*dzi*dzi;
589 RT B = (rbc(i,j,k,1)*dzi - rbc(i,j,k,0)*RT(0.5))
590 / (rbc(i,j,k,1)*dzi + rbc(i,j,k,0)*RT(0.5));
591 afab(i,j,k-1,icomp) += fac*bfab(i,j,k,icomp)*(RT(1.0)-B);
603template <
typename MF>
607 if (this->hasRobinBC()) {
608 detail::applyRobinBCTermsCoeffs(*
this);
612template <
typename MF>
616 BL_PROFILE(
"MLABecLaplacian::averageDownCoeffs()");
618 for (
int amrlev = this->m_num_amr_levels-1; amrlev > 0; --amrlev)
620 auto& fine_a_coeffs = m_a_coeffs[amrlev];
621 auto& fine_b_coeffs = m_b_coeffs[amrlev];
623 averageDownCoeffsSameAmrLevel(amrlev, fine_a_coeffs, fine_b_coeffs);
624 averageDownCoeffsToCoarseAmrLevel(amrlev);
627 averageDownCoeffsSameAmrLevel(0, m_a_coeffs[0], m_b_coeffs[0]);
630template <
typename MF>
635 int nmglevs = a.
size();
636 for (
int mglev = 1; mglev < nmglevs; ++mglev)
638 IntVect ratio = (amrlev > 0) ?
IntVect(this->mg_coarsen_ratio) : this->mg_coarsen_ratio_vec[mglev-1];
640 if (m_a_scalar == 0.0)
642 a[mglev].setVal(
RT(0.0));
659 for (
int mglev = 1; mglev < nmglevs; ++mglev)
661 if (this->m_overset_mask[amrlev][mglev]) {
662 const RT fac =
static_cast<RT>(1 << mglev);
663 const RT osfac =
RT(2.0)*fac/(fac+
RT(1.0));
664 const int ncomp = this->getNComp();
666#pragma omp parallel if (Gpu::notInLaunchRegion())
671 Box const& ybx = mfi.nodaltilebox(1);,
672 Box const& zbx = mfi.nodaltilebox(2));
674 auto const& by = b[mglev][1].array(mfi);,
675 auto const& bz = b[mglev][2].array(mfi));
676 auto const& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
677#if defined(AMREX_USE_CUDA) && defined(_WIN32)
681 overset_rescale_bcoef_x(t_xbx, bx, osm, ncomp, osfac);
683#if (AMREX_SPACEDIM >= 2)
687 overset_rescale_bcoef_y(t_ybx, by, osm, ncomp, osfac);
690#if (AMREX_SPACEDIM == 3)
694 overset_rescale_bcoef_z(t_zbx, bz, osm, ncomp, osfac);
701 overset_rescale_bcoef_x(t_xbx, bx, osm, ncomp, osfac);
705 overset_rescale_bcoef_y(t_ybx, by, osm, ncomp, osfac);
709 overset_rescale_bcoef_z(t_zbx, bz, osm, ncomp, osfac);
717template <
typename MF>
721 auto& fine_a_coeffs = m_a_coeffs[flev ].back();
722 auto& fine_b_coeffs = m_b_coeffs[flev ].back();
723 auto& crse_a_coeffs = m_a_coeffs[flev-1].front();
724 auto& crse_b_coeffs = m_b_coeffs[flev-1].front();
726 if (m_a_scalar != 0.0) {
734 IntVect(this->mg_coarsen_ratio),
735 this->m_geom[flev-1][0]);
738template <
typename MF>
742 m_is_singular.clear();
743 m_is_singular.resize(this->m_num_amr_levels,
false);
744 auto itlo = std::find(this->m_lobc[0].
begin(), this->m_lobc[0].
end(), BCType::Dirichlet);
745 auto ithi = std::find(this->m_hibc[0].
begin(), this->m_hibc[0].
end(), BCType::Dirichlet);
746 if (itlo == this->m_lobc[0].
end() && ithi == this->m_hibc[0].
end())
748 for (
int alev = 0; alev < this->m_num_amr_levels; ++alev)
751 if (this->m_domain_covered[alev] && !this->m_overset_mask[alev][0])
753 if (m_a_scalar ==
Real(0.0))
755 m_is_singular[alev] =
true;
759 RT asum = m_a_coeffs[alev].back().sum(0,
IntVect(0));
760 RT amax = m_a_coeffs[alev].back().norminf(0,1,
IntVect(0));
761 m_is_singular[alev] = (std::abs(asum) <= amax * RT(1.e-12));
767 if (!m_is_singular[0] && this->m_needs_coarse_data_for_bc &&
772 bool lev0_a_is_zero =
false;
773 if (m_a_scalar ==
Real(0.0)) {
774 lev0_a_is_zero =
true;
776 RT asum = m_a_coeffs[0].back().sum(0,
IntVect(0));
777 RT amax = m_a_coeffs[0].back().norminf(0,1,
IntVect(0));
778 bool a_is_almost_zero = std::abs(asum) <= amax * RT(1.e-12);
779 if (a_is_almost_zero) { lev0_a_is_zero =
true; }
782 if (lev0_a_is_zero) {
783 auto bbox = this->m_grids[0][0].minimalBox();
784 for (
int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
792 if (this->m_geom[0][0].Domain().contains(bbox)) {
793 m_is_singular[0] =
true;
799template <
typename MF>
805 const MF& acoef = m_a_coeffs[amrlev][mglev];
806 AMREX_D_TERM(
const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
807 const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
808 const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
811 {
AMREX_D_DECL(
static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
812 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
813 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
815 const RT ascalar = m_a_scalar;
816 const RT bscalar = m_b_scalar;
818 const int ncomp = this->getNComp();
822 const auto& xma = in.const_arrays();
823 const auto& yma = out.arrays();
824 const auto& ama = acoef.arrays();
826 const auto& byma = bycoef.const_arrays();,
827 const auto& bzma = bzcoef.const_arrays(););
828 if (this->m_overset_mask[amrlev][mglev]) {
829 const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
833 mlabeclap_adotx_os(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
835 osmma[box_no], dxinv, ascalar, bscalar);
841 mlabeclap_adotx(i,j,k,n, yma[box_no], xma[box_no], ama[box_no],
843 dxinv, ascalar, bscalar);
853#pragma omp parallel if (Gpu::notInLaunchRegion())
857 const Box& bx = mfi.tilebox();
858 const auto& xfab = in.array(mfi);
859 const auto& yfab = out.array(mfi);
860 const auto& afab = acoef.array(mfi);
862 const auto& byfab = bycoef.array(mfi);,
863 const auto& bzfab = bzcoef.array(mfi););
864 if (this->m_overset_mask[amrlev][mglev]) {
865 const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
868 mlabeclap_adotx_os(i,j,k,n, yfab, xfab, afab,
AMREX_D_DECL(bxfab,byfab,bzfab),
869 osm, dxinv, ascalar, bscalar);
874 mlabeclap_adotx(i,j,k,n, yfab, xfab, afab,
AMREX_D_DECL(bxfab,byfab,bzfab),
875 dxinv, ascalar, bscalar);
882template <
typename MF>
888 bool regular_coarsening =
true;
889 if (amrlev == 0 && mglev > 0) {
890 regular_coarsening = this->mg_coarsen_ratio_vec[mglev-1] == this->mg_coarsen_ratio;
894 if (! this->m_use_gauss_seidel && regular_coarsening) {
895 Ax.define(sol.boxArray(), sol.DistributionMap(), sol.nComp(), 0);
896 Fapply(amrlev, mglev, Ax, sol);
899 const MF& acoef = m_a_coeffs[amrlev][mglev];
901 AMREX_D_TERM(
const MF& bxcoef = m_b_coeffs[amrlev][mglev][0];,
902 const MF& bycoef = m_b_coeffs[amrlev][mglev][1];,
903 const MF& bzcoef = m_b_coeffs[amrlev][mglev][2];);
904 const auto& undrrelxr = this->m_undrrelxr[amrlev][mglev];
905 const auto& maskvals = this->m_maskvals [amrlev][mglev];
909 const auto& f0 = undrrelxr[oitr()]; ++oitr;
910 const auto& f1 = undrrelxr[oitr()]; ++oitr;
911#if (AMREX_SPACEDIM > 1)
912 const auto& f2 = undrrelxr[oitr()]; ++oitr;
913 const auto& f3 = undrrelxr[oitr()]; ++oitr;
914#if (AMREX_SPACEDIM > 2)
915 const auto& f4 = undrrelxr[oitr()]; ++oitr;
916 const auto& f5 = undrrelxr[oitr()]; ++oitr;
922#if (AMREX_SPACEDIM > 1)
925#if (AMREX_SPACEDIM > 2)
931 const int nc = this->getNComp();
932 const Real* h = this->m_geom[amrlev][mglev].CellSize();
934 const RT dhy = m_b_scalar/
static_cast<RT>(h[1]*h[1]);,
935 const RT dhz = m_b_scalar/
static_cast<RT>(h[2]*h[2]));
936 const RT alpha = m_a_scalar;
940 && (this->m_overset_mask[amrlev][mglev] || regular_coarsening))
944#if (AMREX_SPACEDIM > 1)
947#if (AMREX_SPACEDIM > 2)
953 const auto& solnma = sol.arrays();
954 const auto& rhsma = rhs.const_arrays();
955 const auto& ama = acoef.const_arrays();
958 const auto& byma = bycoef.const_arrays();,
959 const auto& bzma = bzcoef.const_arrays(););
961 const auto& f0ma = f0.const_arrays();
962 const auto& f1ma = f1.const_arrays();
963#if (AMREX_SPACEDIM > 1)
964 const auto& f2ma = f2.const_arrays();
965 const auto& f3ma = f3.const_arrays();
966#if (AMREX_SPACEDIM > 2)
967 const auto& f4ma = f4.const_arrays();
968 const auto& f5ma = f5.const_arrays();
972 if (this->m_overset_mask[amrlev][mglev]) {
973 const auto& osmma = this->m_overset_mask[amrlev][mglev]->const_arrays();
974 if (this->m_use_gauss_seidel) {
978 Box vbx(ama[box_no]);
979 abec_gsrb_os(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
986 osmma[box_no], vbx, redblack);
989 const auto& axma = Ax.const_arrays();
993 Box vbx(ama[box_no]);
994 abec_jacobi_os(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
1002 osmma[box_no], vbx);
1005 }
else if (regular_coarsening) {
1006 if (this->m_use_gauss_seidel) {
1010 Box vbx(ama[box_no]);
1011 abec_gsrb(i,j,k,n, solnma[box_no], rhsma[box_no], alpha, ama[box_no],
1021 const auto& axma = Ax.const_arrays();
1025 Box vbx(ama[box_no]);
1026 abec_jacobi(i,j,k,n, solnma[box_no], rhsma[box_no], axma[box_no],
1048#pragma omp parallel if (Gpu::notInLaunchRegion())
1052 const auto& m0 = mm0.
array(mfi);
1053 const auto& m1 = mm1.
array(mfi);
1054#if (AMREX_SPACEDIM > 1)
1055 const auto& m2 = mm2.
array(mfi);
1056 const auto& m3 = mm3.
array(mfi);
1057#if (AMREX_SPACEDIM > 2)
1058 const auto& m4 = mm4.
array(mfi);
1059 const auto& m5 = mm5.
array(mfi);
1063 const Box& tbx = mfi.tilebox();
1064 const Box& vbx = mfi.validbox();
1065 const auto& solnfab = sol.array(mfi);
1066 const auto& rhsfab = rhs.const_array(mfi);
1067 const auto& afab = acoef.const_array(mfi);
1069 AMREX_D_TERM(
const auto& bxfab = bxcoef.const_array(mfi);,
1070 const auto& byfab = bycoef.const_array(mfi);,
1071 const auto& bzfab = bzcoef.const_array(mfi););
1073 const auto& f0fab = f0.const_array(mfi);
1074 const auto& f1fab = f1.const_array(mfi);
1075#if (AMREX_SPACEDIM > 1)
1076 const auto& f2fab = f2.const_array(mfi);
1077 const auto& f3fab = f3.const_array(mfi);
1078#if (AMREX_SPACEDIM > 2)
1079 const auto& f4fab = f4.const_array(mfi);
1080 const auto& f5fab = f5.const_array(mfi);
1084 if (this->m_overset_mask[amrlev][mglev]) {
1085 const auto& osm = this->m_overset_mask[amrlev][mglev]->const_array(mfi);
1086 if (this->m_use_gauss_seidel) {
1089 abec_gsrb_os(i,j,k,n, solnfab, rhsfab, alpha, afab,
1096 osm, vbx, redblack);
1099 const auto& axfab = Ax.const_array(mfi);
1102 abec_jacobi_os(i,j,k,n, solnfab, rhsfab, axfab,
1113 }
else if (regular_coarsening) {
1114 if (this->m_use_gauss_seidel) {
1117 abec_gsrb(i,j,k,n, solnfab, rhsfab, alpha, afab,
1127 const auto& axfab = Ax.const_array(mfi);
1130 abec_jacobi(i,j,k,n, solnfab, rhsfab, axfab,
1143 abec_gsrb_with_line_solve(tbx, solnfab, rhsfab, alpha, afab,
1156template <
typename MF>
1164 const int mglev = 0;
1166 const Real* dxinv = this->m_geom[amrlev][mglev].InvCellSize();
1167 const int ncomp = this->getNComp();
1168 FFlux(box, dxinv, m_b_scalar,
1170 &(m_b_coeffs[amrlev][mglev][1][mfi]),
1171 &(m_b_coeffs[amrlev][mglev][2][mfi]))}},
1172 flux, sol, face_only, ncomp);
1175template <
typename MF>
1180 FAB const& sol,
int face_only,
int ncomp)
1183 const auto by = bcoef[1]->const_array();,
1184 const auto bz = bcoef[2]->const_array(););
1186 const auto& fyarr = flux[1]->array();,
1187 const auto& fzarr = flux[2]->array(););
1188 const auto& solarr = sol.array();
1192 RT fac = bscalar*
static_cast<RT>(dxinv[0]);
1194 int blen = box.
length(0);
1197 mlabeclap_flux_xface(tbox, fxarr, solarr, bx, fac, blen, ncomp);
1199#if (AMREX_SPACEDIM >= 2)
1200 fac = bscalar*
static_cast<RT>(dxinv[1]);
1205 mlabeclap_flux_yface(tbox, fyarr, solarr, by, fac, blen, ncomp);
1208#if (AMREX_SPACEDIM == 3)
1209 fac = bscalar*
static_cast<RT>(dxinv[2]);
1214 mlabeclap_flux_zface(tbox, fzarr, solarr, bz, fac, blen, ncomp);
1220 RT fac = bscalar*
static_cast<RT>(dxinv[0]);
1224 mlabeclap_flux_x(tbox, fxarr, solarr, bx, fac, ncomp);
1226#if (AMREX_SPACEDIM >= 2)
1227 fac = bscalar*
static_cast<RT>(dxinv[1]);
1231 mlabeclap_flux_y(tbox, fyarr, solarr, by, fac, ncomp);
1234#if (AMREX_SPACEDIM == 3)
1235 fac = bscalar*
static_cast<RT>(dxinv[2]);
1239 mlabeclap_flux_z(tbox, fzarr, solarr, bz, fac, ncomp);
1245template <
typename MF>
1251 const auto& acoef = m_a_coeffs[amrlev][mglev];
1252 AMREX_D_TERM(
const auto& bxcoef = m_b_coeffs[amrlev][mglev][0];,
1253 const auto& bycoef = m_b_coeffs[amrlev][mglev][1];,
1254 const auto& bzcoef = m_b_coeffs[amrlev][mglev][2];);
1257 {
AMREX_D_DECL(
static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(0)),
1258 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(1)),
1259 static_cast<RT>(this->m_geom[amrlev][mglev].InvCellSize(2)))};
1261 const RT ascalar = m_a_scalar;
1262 const RT bscalar = m_b_scalar;
1264 const int ncomp = getNComp();
1268 const auto& ma = mf.arrays();
1269 const auto& ama = acoef.const_arrays();
1270 AMREX_D_TERM(
const auto& bxma = bxcoef.const_arrays();,
1271 const auto& byma = bycoef.const_arrays();,
1272 const auto& bzma = bzcoef.const_arrays(););
1276 mlabeclap_normalize(i,j,k,n, ma[box_no], ama[box_no],
1278 dxinv, ascalar, bscalar);
1287#pragma omp parallel if (Gpu::notInLaunchRegion())
1291 const Box& bx = mfi.tilebox();
1292 const auto& fab = mf.array(mfi);
1293 const auto& afab = acoef.array(mfi);
1295 const auto& byfab = bycoef.array(mfi);,
1296 const auto& bzfab = bzcoef.array(mfi););
1300 mlabeclap_normalize(i,j,k,n, fab, afab,
AMREX_D_DECL(bxfab,byfab,bzfab),
1301 dxinv, ascalar, bscalar);
1307template <
typename MF>
1311 bool support =
false;
1312 if (this->m_overset_mask[0][0]) {
1314 this->mg_domain_min_width)
1323template <
typename MF>
1324std::unique_ptr<MLLinOpT<MF>>
1327 if (this->m_overset_mask[0][0] ==
nullptr) {
return nullptr; }
1329 const Geometry& geom = this->m_geom[0].back();
1330 const BoxArray& ba = this->m_grids[0].back();
1333 std::unique_ptr<MLLinOpT<MF>> r
1343 nop->setMaxOrder(this->maxorder);
1344 nop->setVerbose(this->verbose);
1346 nop->setDomainBC(this->m_lobc, this->m_hibc);
1348 if (this->needsCoarseDataForBC())
1350 const Real* dx0 = this->m_geom[0][0].CellSize();
1351 RealVect fac(this->m_coarse_data_crse_ratio);
1354 nop->setCoarseFineBCLocation(cbloc);
1357 nop->setScalars(m_a_scalar, m_b_scalar);
1359 MF
const& alpha_bottom = m_a_coeffs[0].back();
1360 iMultiFab const& osm_bottom = *(this->m_overset_mask[0].back());
1361 const int ncomp = alpha_bottom.
nComp();
1362 MF alpha(ba, dm, ncomp, 0);
1364 RT a_max = alpha_bottom.norminf(0, ncomp,
IntVect(0),
true,
true);
1365 const int ncomp_b = m_b_coeffs[0].back()[0].nComp();
1367 RT by_max = m_b_coeffs[0].back()[1].
norminf(0,ncomp_b,
IntVect(0),
true,
true);,
1368 RT bz_max = m_b_coeffs[0].back()[2].
norminf(0,ncomp_b,
IntVect(0),
true,
true));
1373 RT huge_alpha =
RT(1.e30) *
1375 AMREX_D_DECL(std::abs(m_b_scalar)*bx_max*dxinv[0]*dxinv[0],
1376 std::abs(m_b_scalar)*by_max*dxinv[1]*dxinv[1],
1377 std::abs(m_b_scalar)*bz_max*dxinv[2]*dxinv[2]));
1382 auto const& ama = alpha.arrays();
1383 auto const& abotma = alpha_bottom.const_arrays();
1388 if (mma[box_no](i,j,k)) {
1389 ama[box_no](i,j,k,n) = abotma[box_no](i,j,k,n);
1391 ama[box_no](i,j,k,n) = huge_alpha;
1401#pragma omp parallel if (Gpu::notInLaunchRegion())
1404 Box const& bx = mfi.tilebox();
1405 auto const& a = alpha.array(mfi);
1406 auto const& abot = alpha_bottom.const_array(mfi);
1411 a(i,j,k,n) = abot(i,j,k,n);
1413 a(i,j,k,n) = huge_alpha;
1419 nop->setACoeffs(0, alpha);
1425template <
typename MF>
1429 if (this->m_overset_mask[0].back() ==
nullptr) {
return; }
1431 const int ncomp = dst.nComp();
1435 auto const& dstma = dst.arrays();
1436 auto const& srcma = src.const_arrays();
1437 auto const& mma = this->m_overset_mask[0].back()->const_arrays();
1441 if (mma[box_no](i,j,k)) {
1442 dstma[box_no](i,j,k,n) = srcma[box_no](i,j,k,n);
1444 dstma[box_no](i,j,k,n) =
RT(0.0);
1454#pragma omp parallel if (Gpu::notInLaunchRegion())
1457 Box const& bx = mfi.tilebox();
1458 auto const& dfab = dst.array(mfi);
1459 auto const& sfab = src.const_array(mfi);
1460 auto const& m = this->m_overset_mask[0].back()->const_array(mfi);
1464 dfab(i,j,k,n) = sfab(i,j,k,n);
1466 dfab(i,j,k,n) =
RT(0.0);
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_LAUNCH_HOST_DEVICE_LAMBDA_RANGE(TN, TI, block)
Definition AMReX_GpuLaunchMacrosC.nolint.H:4
#define AMREX_HOST_DEVICE_FOR_3D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:106
#define AMREX_HOST_DEVICE_PARALLEL_FOR_4D(...)
Definition AMReX_GpuLaunchMacrosC.nolint.H:111
#define AMREX_LAUNCH_HOST_DEVICE_LAMBDA_DIM(...)
Definition AMReX_GpuLaunch.nolint.H:37
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_LOOP_4D(bx, ncomp, i, j, k, n, block)
Definition AMReX_Loop.nolint.H:16
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
const Real * InvCellSize() const noexcept
Returns the inverse cellsize for each coordinate direction.
Definition AMReX_CoordSys.H:82
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
int nComp() const noexcept
Return number of variables (aka components) associated with each point.
Definition AMReX_FabArrayBase.H:83
Array4< typename FabArray< FAB >::value_type const > const_array(const MFIter &mfi) const noexcept
Definition AMReX_FabArray.H:587
MultiArray4< typename FabArray< FAB >::value_type const > const_arrays() const noexcept
Definition AMReX_FabArray.H:649
Definition AMReX_FabFactory.H:50
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
__host__ static __device__ constexpr IntVectND< dim > TheDimensionVector(int d) noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition AMReX_IntVect.H:698
Iterator for looping ever tiles and boxes of amrex::FabArray based containers.
Definition AMReX_MFIter.H:85
Box tilebox() const noexcept
Return the tile Box at the current index.
Definition AMReX_MFIter.cpp:389
bool isValid() const noexcept
Is the iterator valid i.e. is it associated with a FAB?
Definition AMReX_MFIter.H:169
Definition AMReX_MLABecLaplacian.H:15
void setACoeffs(int amrlev, const AMF &alpha)
Definition AMReX_MLABecLaplacian.H:325
RT getBScalar() const final
Definition AMReX_MLABecLaplacian.H:167
int getNComp() const override
Return number of components.
Definition AMReX_MLABecLaplacian.H:147
void FFlux(int amrlev, const MFIter &mfi, const Array< FAB *, 3 > &flux, const FAB &sol, Location, int face_only=0) const override
Definition AMReX_MLABecLaplacian.H:1158
bool isSingular(int amrlev) const override
Is it singular on given AMR level?
Definition AMReX_MLABecLaplacian.H:155
void applyRobinBCTermsCoeffs()
Definition AMReX_MLABecLaplacian.H:605
Vector< Vector< Array< MF, 3 > > > m_b_coeffs
Definition AMReX_MLABecLaplacian.H:196
typename MF::fab_type FAB
Definition AMReX_MLABecLaplacian.H:18
MLABecLaplacianT< MF > & operator=(const MLABecLaplacianT< MF > &)=delete
bool supportRobinBC() const noexcept override
Definition AMReX_MLABecLaplacian.H:207
RT m_b_scalar
Definition AMReX_MLABecLaplacian.H:194
typename MF::value_type RT
Definition AMReX_MLABecLaplacian.H:19
~MLABecLaplacianT() override
void prepareForSolve() override
Definition AMReX_MLABecLaplacian.H:423
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={}, int a_ncomp=1)
Definition AMReX_MLABecLaplacian.H:245
RT m_a_scalar
Definition AMReX_MLABecLaplacian.H:193
void averageDownCoeffsToCoarseAmrLevel(int flev)
Definition AMReX_MLABecLaplacian.H:719
void averageDownCoeffsSameAmrLevel(int amrlev, Vector< MF > &a, Vector< Array< MF, 3 > > &b)
Definition AMReX_MLABecLaplacian.H:632
bool m_scalars_set
Definition AMReX_MLABecLaplacian.H:198
Vector< int > m_is_singular
Definition AMReX_MLABecLaplacian.H:205
void setBCoeffs(int amrlev, const Array< AMF const *, 3 > &beta)
Definition AMReX_MLABecLaplacian.H:352
void averageDownCoeffs()
Definition AMReX_MLABecLaplacian.H:614
RT getAScalar() const final
Definition AMReX_MLABecLaplacian.H:166
void normalize(int amrlev, int mglev, MF &mf) const override
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition AMReX_MLABecLaplacian.H:1247
void Fsmooth(int amrlev, int mglev, MF &sol, const MF &rhs, int redblack) const override
Definition AMReX_MLABecLaplacian.H:884
bool m_needs_update
Definition AMReX_MLABecLaplacian.H:203
std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const final
Definition AMReX_MLABecLaplacian.H:1325
bool m_acoef_set
Definition AMReX_MLABecLaplacian.H:199
Array< MF const *, 3 > getBCoeffs(int amrlev, int mglev) const final
Definition AMReX_MLABecLaplacian.H:170
MLABecLaplacianT(MLABecLaplacianT< MF > &&)=delete
Vector< Vector< MF > > m_a_coeffs
Definition AMReX_MLABecLaplacian.H:195
bool isBottomSingular() const override
Is the bottom of MG singular?
Definition AMReX_MLABecLaplacian.H:156
void applyMetricTermsCoeffs()
Definition AMReX_MLABecLaplacian.H:444
void copyNSolveSolution(MF &dst, MF const &src) const final
Definition AMReX_MLABecLaplacian.H:1427
bool supportNSolve() const override
Definition AMReX_MLABecLaplacian.H:1309
MLABecLaplacianT()=default
typename MLLinOpT< MF >::Location Location
Definition AMReX_MLABecLaplacian.H:22
MF const * getACoeffs(int amrlev, int mglev) const final
Definition AMReX_MLABecLaplacian.H:168
void update() override
Update for reuse.
Definition AMReX_MLABecLaplacian.H:402
bool needsUpdate() const override
Does it need update if it's reused?
Definition AMReX_MLABecLaplacian.H:149
void setScalars(T1 a, T2 b) noexcept
Definition AMReX_MLABecLaplacian.H:306
void Fapply(int amrlev, int mglev, MF &out, const MF &in) const override
Definition AMReX_MLABecLaplacian.H:801
MLABecLaplacianT(const MLABecLaplacianT< MF > &)=delete
Definition AMReX_MLCellABecLap.H:14
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info=LPInfo(), const Vector< FabFactory< FAB > const * > &a_factory={})
Definition AMReX_MLCellABecLap.H:95
void prepareForSolve() override
Definition AMReX_MLCellABecLap.H:250
void update() override
Update for reuse.
Definition AMReX_MLCellABecLap.H:243
static constexpr int mg_coarsen_ratio
Definition AMReX_MLLinOp.H:588
static constexpr int mg_box_min_width
Definition AMReX_MLLinOp.H:589
const MLLinOpT< MF > * m_parent
Definition AMReX_MLLinOp.H:604
Definition AMReX_MultiMask.H:18
MultiArray4< int const > const_arrays() const noexcept
Definition AMReX_MultiMask.H:48
Array4< int const > array(const MFIter &mfi) const noexcept
Definition AMReX_MultiMask.H:40
An Iterator over the Orientation of Faces of a Box.
Definition AMReX_Orientation.H:135
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
A Collection of IArrayBoxes.
Definition AMReX_iMultiFab.H:34
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
std::array< T, N > Array
Definition AMReX_Array.H:26
void Max(KeyValuePair< K, V > &vi, MPI_Comm comm)
Definition AMReX_ParallelReduce.H:133
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:263
bool inLaunchRegion() noexcept
Definition AMReX_GpuControl.H:92
bool notInLaunchRegion() noexcept
Definition AMReX_GpuControl.H:93
bool inNoSyncRegion() noexcept
Definition AMReX_GpuControl.H:152
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
Definition AMReX_Amr.cpp:49
__host__ __device__ BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition AMReX_Box.H:1735
MF::value_type norminf(MF const &mf, int scomp, int ncomp, IntVect const &nghost, bool local=false)
Definition AMReX_FabArrayUtility.H:2029
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
int nComp(FabArrayBase const &fa)
Definition AMReX_FabArrayBase.cpp:2854
std::array< T const *, 3 > GetArrOfConstPtrs(const std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1017
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition AMReX_CTOParallelForImpl.H:193
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
__host__ __device__ BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the cell centered BoxND of length len adjacent to b on the low end along the coordinate direct...
Definition AMReX_Box.H:1714
__host__ __device__ BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition AMReX_Box.H:1522
void average_down_faces(const Vector< const MF * > &fine, const Vector< MF * > &crse, const IntVect &ratio, int ngcrse=0)
Average fine face-based FabArray onto crse face-based FabArray.
Definition AMReX_MultiFabUtil.H:989
__host__ __device__ BoxND< dim > bdryLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Return the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
Definition AMReX_Box.H:1625
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
__host__ __device__ Dim3 begin(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2006
std::array< T *, 3 > GetArrOfPtrs(std::array< T, 3 > &a) noexcept
Definition AMReX_Array.H:1005
LinOpBCType
Definition AMReX_LO_BCTYPES.H:22
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
__host__ __device__ constexpr const T & max(const T &a, const T &b) noexcept
Definition AMReX_Algorithm.H:44
bool TilingIfNotGPU() noexcept
Definition AMReX_MFIter.H:12
__host__ __device__ Dim3 end(BoxND< dim > const &box) noexcept
Definition AMReX_Box.H:2015
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:41
Definition AMReX_MLLinOp.H:36
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
Definition AMReX_MFIter.H:20
MFItInfo & SetDynamic(bool f) noexcept
Definition AMReX_MFIter.H:40
MFItInfo & EnableTiling(const IntVect &ts=FabArrayBase::mfiter_tile_size) noexcept
Definition AMReX_MFIter.H:31