Block-Structured AMR Software Framework
 
Loading...
Searching...
No Matches
AMReX_MLLinOp.H
Go to the documentation of this file.
1#ifndef AMREX_ML_LINOP_H_
2#define AMREX_ML_LINOP_H_
3#include <AMReX_Config.H>
4
5#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
6#include <AMReX_Hypre.H>
8#endif
9
10#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
11#include <AMReX_PETSc.H>
12#endif
13
14#ifdef AMREX_USE_EB
16#include <AMReX_MultiCutFab.H>
17#endif
18
19#include <AMReX_Any.H>
20#include <AMReX_BndryRegister.H>
21#include <AMReX_FabDataType.H>
22#include <AMReX_MLMGBndry.H>
23#include <AMReX_MultiFab.H>
24#include <AMReX_MultiFabUtil.H>
25
26#include <algorithm>
27#include <string>
28
29namespace amrex {
30
31enum class BottomSolver : int {
33};
34
35struct LPInfo
36{
37 bool do_agglomeration = true;
38 bool do_consolidation = true;
39 bool do_semicoarsening = false;
40 int agg_grid_size = -1;
41 int con_grid_size = -1;
42 int con_ratio = 2;
43 int con_strategy = 3;
44 bool has_metric_term = true;
49 bool deterministic = false;
50
51 LPInfo& setAgglomeration (bool x) noexcept { do_agglomeration = x; return *this; }
52 LPInfo& setConsolidation (bool x) noexcept { do_consolidation = x; return *this; }
53 LPInfo& setSemicoarsening (bool x) noexcept { do_semicoarsening = x; return *this; }
54 LPInfo& setAgglomerationGridSize (int x) noexcept { agg_grid_size = x; return *this; }
55 LPInfo& setConsolidationGridSize (int x) noexcept { con_grid_size = x; return *this; }
56 LPInfo& setConsolidationRatio (int x) noexcept { con_ratio = x; return *this; }
57 LPInfo& setConsolidationStrategy (int x) noexcept { con_strategy = x; return *this; }
58 LPInfo& setMetricTerm (bool x) noexcept { has_metric_term = x; return *this; }
59 LPInfo& setMaxCoarseningLevel (int n) noexcept { max_coarsening_level = n; return *this; }
60 LPInfo& setMaxSemicoarseningLevel (int n) noexcept { max_semicoarsening_level = n; return *this; }
61 LPInfo& setSemicoarseningDirection (int n) noexcept { semicoarsening_direction = n; return *this; }
62 LPInfo& setHiddenDirection (int n) noexcept { hidden_direction = n; return *this; }
63 LPInfo& setDeterministic (bool x) noexcept { deterministic = x; return *this; }
64
65 [[nodiscard]] bool hasHiddenDimension () const noexcept {
66 return hidden_direction >=0 && hidden_direction < AMREX_SPACEDIM;
67 }
68
69 static constexpr int getDefaultAgglomerationGridSize () {
70#ifdef AMREX_USE_GPU
71 return 32;
72#else
73 return AMREX_D_PICK(32, 16, 8);
74#endif
75 }
76
77 static constexpr int getDefaultConsolidationGridSize () {
78#ifdef AMREX_USE_GPU
79 return 32;
80#else
81 return AMREX_D_PICK(32, 16, 8);
82#endif
83 }
84};
85
92
93template <typename T> class MLMGT;
94template <typename T> class MLCGSolverT;
95template <typename T> class MLPoissonT;
96template <typename T> class MLABecLaplacianT;
97template <typename T> class GMRESMLMGT;
98
100template <typename MF>
102{
103public:
104
105 template <typename T> friend class MLMGT;
106 template <typename T> friend class MLCGSolverT;
107 template <typename T> friend class MLPoissonT;
108 template <typename T> friend class MLABecLaplacianT;
109 template <typename T> friend class GMRESMLMGT;
110
111 using MFType = MF;
114
119
120 MLLinOpT () = default;
121 virtual ~MLLinOpT () = default;
122
123 MLLinOpT (const MLLinOpT<MF>&) = delete;
124 MLLinOpT (MLLinOpT<MF>&&) = delete;
127
128 void define (const Vector<Geometry>& a_geom,
129 const Vector<BoxArray>& a_grids,
130 const Vector<DistributionMapping>& a_dmap,
131 const LPInfo& a_info,
132 const Vector<FabFactory<FAB> const*>& a_factory,
133 bool eb_limit_coarsening = true);
134
135 [[nodiscard]] virtual std::string name () const { return std::string("Unspecified"); }
136
148 const Array<BCType,AMREX_SPACEDIM>& hibc) noexcept;
149
160 const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc) noexcept;
161
172 const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept;
173
181 [[nodiscard]] bool needsCoarseDataForBC () const noexcept { return m_needs_coarse_data_for_bc; }
182
204 void setCoarseFineBC (const MF* crse, int crse_ratio,
205 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
206
207 void setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
208 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
209
210 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
211 void setCoarseFineBC (const AMF* crse, int crse_ratio,
212 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
213
214 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
215 void setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
216 LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
217
236 virtual void setLevelBC (int /*amrlev*/, const MF* /*levelbcdata*/,
237 const MF* /*robinbc_a*/ = nullptr,
238 const MF* /*robinbc_b*/ = nullptr,
239 const MF* /*robinbc_f*/ = nullptr) = 0;
240
241 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF> &&
242 IsMultiFabLike_v<AMF>, int> = 0>
243 void setLevelBC (int amrlev, const AMF* levelbcdata,
244 const AMF* robinbc_a = nullptr,
245 const AMF* robinbc_b = nullptr,
246 const AMF* robinbc_f = nullptr);
247
249 void setVerbose (int v) noexcept { verbose = v; }
250
252 void setMaxOrder (int o) noexcept { maxorder = o; }
254 [[nodiscard]] int getMaxOrder () const noexcept { return maxorder; }
255
261 [[nodiscard]] bool getEnforceSingularSolvable () const noexcept { return enforceSingularSolvable; }
262
263 [[nodiscard]] virtual BottomSolver getDefaultBottomSolver () const { return BottomSolver::bicgstab; }
264
266 [[nodiscard]] virtual int getNComp () const { return 1; }
267
268 [[nodiscard]] virtual int getNGrow (int /*a_lev*/ = 0, int /*mg_lev*/ = 0) const { return 0; }
269
271 [[nodiscard]] virtual bool needsUpdate () const { return false; }
273 virtual void update () {}
274
284 virtual void restriction (int amrlev, int cmglev, MF& crse, MF& fine) const = 0;
285
294 virtual void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const = 0;
295
304 virtual void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
305 {
306 amrex::ignore_unused(amrlev, fmglev, fine, crse);
307 amrex::Abort("MLLinOpT::interpAssign: Must be implemented for FMG cycle");
308 }
309
318 virtual void interpolationAmr (int famrlev, MF& fine, const MF& crse,
319 IntVect const& nghost) const
320 {
321 amrex::ignore_unused(famrlev, fine, crse, nghost);
322 amrex::Abort("MLLinOpT::interpolationAmr: Must be implemented for composite solves across multiple AMR levels");
323 }
324
334 virtual void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
335 const MF& fine_sol, const MF& fine_rhs)
336 {
337 amrex::ignore_unused(camrlev, crse_sol, crse_rhs, fine_sol, fine_rhs);
338 amrex::Abort("MLLinOpT::averageDownSolutionRHS: Must be implemented for composite solves across multiple AMR levels");
339 }
340
352 virtual void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
353 StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const = 0;
354
365 virtual void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
366 bool skip_fillboundary, int niter) const = 0;
367
369 virtual void normalize (int amrlev, int mglev, MF& mf) const {
370 amrex::ignore_unused(amrlev, mglev, mf);
371 }
372
382 virtual void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
383 const MF* crse_bcdata=nullptr) = 0;
384
385 virtual void prepareForFluxes (int /*amrlev*/, const MF* /*crse_bcdata*/ = nullptr) {}
386
398 virtual void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
399 BCMode bc_mode, const MF* crse_bcdata=nullptr) = 0;
400
412 virtual void reflux (int crse_amrlev,
413 MF& res, const MF& crse_sol, const MF& crse_rhs,
414 MF& fine_res, MF& fine_sol, const MF& fine_rhs) const
415 {
416 amrex::ignore_unused(crse_amrlev, res, crse_sol, crse_rhs, fine_res,
417 fine_sol, fine_rhs);
418 amrex::Abort("MLLinOpT::reflux: Must be implemented for composite solves across multiple AMR levels");
419 }
420
429 virtual void compFlux (int amrlev, const Array<MF*,AMREX_SPACEDIM>& fluxes,
430 MF& sol, Location loc) const
431 {
432 amrex::ignore_unused(amrlev, fluxes, sol, loc);
433 amrex::Abort("AMReX_MLLinOp::compFlux::How did we get here?");
434 }
435
444 virtual void compGrad (int amrlev, const Array<MF*,AMREX_SPACEDIM>& grad,
445 MF& sol, Location loc) const
446 {
447 amrex::ignore_unused(amrlev, grad, sol, loc);
448 amrex::Abort("AMReX_MLLinOp::compGrad::How did we get here?");
449 }
450
452 virtual void applyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
454 virtual void unapplyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
455
457 virtual void unimposeNeumannBC (int /*amrlev*/, MF& /*rhs*/) const {}
458
460 virtual void applyInhomogNeumannTerm (int /*amrlev*/, MF& /*rhs*/) const {}
461
463 virtual void applyOverset (int /*amlev*/, MF& /*rhs*/) const {}
464
466 [[nodiscard]] virtual bool scaleRHS (int /*amrlev*/, MF* /*rhs*/) const {
467 return false;
468 }
469
471 virtual Vector<RT> getSolvabilityOffset (int /*amrlev*/, int /*mglev*/,
472 MF const& /*rhs*/) const { return {}; }
473
475 virtual void fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/,
476 Vector<RT> const& /*offset*/) const {}
477
478 virtual void prepareForSolve () = 0;
479
480 virtual void preparePrecond () {}
481
484 virtual void setDirichletNodesToZero (int /*amrlev*/, int /*mglev*/,
485 MF& /*mf*/) const
486 {
487 amrex::Warning("This function might need to be implemented for GMRES to work with this LinOp.");
488 }
489
491 [[nodiscard]] virtual bool isSingular (int amrlev) const = 0;
493 [[nodiscard]] virtual bool isBottomSingular () const = 0;
494
496 virtual RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const = 0;
497
499
500 virtual RT norm2Precond (Vector<MF const*> const& x) const;
501
502 virtual std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const
503 {
504 amrex::Abort("MLLinOp::makeNLinOp: N-Solve not supported");
505 return nullptr;
506 }
507
508 virtual void getFluxes (const Vector<Array<MF*,AMREX_SPACEDIM> >& /*a_flux*/,
509 const Vector<MF*>& /*a_sol*/,
510 Location /*a_loc*/) const {
511 amrex::Abort("MLLinOp::getFluxes: How did we get here?");
512 }
513 virtual void getFluxes (const Vector<MF*>& /*a_flux*/,
514 const Vector<MF*>& /*a_sol*/) const {
515 amrex::Abort("MLLinOp::getFluxes: How did we get here?");
516 }
517
518#ifdef AMREX_USE_EB
519 virtual void getEBFluxes (const Vector<MF*>& /*a_flux*/,
520 const Vector<MF*>& /*a_sol*/) const {
521 amrex::Abort("MLLinOp::getEBFluxes: How did we get here?");
522 }
523#endif
524
525#if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
526 [[nodiscard]] virtual std::unique_ptr<Hypre> makeHypre (Hypre::Interface /*hypre_interface*/) const {
527 amrex::Abort("MLLinOp::makeHypre: How did we get here?");
528 return {nullptr};
529 }
530 [[nodiscard]] virtual std::unique_ptr<HypreNodeLap> makeHypreNodeLap(
531 int /*bottom_verbose*/,
532 const std::string& /* options_namespace */) const
533 {
534 amrex::Abort("MLLinOp::makeHypreNodeLap: How did we get here?");
535 return {nullptr};
536 }
537#endif
538
539#if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
540 [[nodiscard]] virtual std::unique_ptr<PETScABecLap> makePETSc () const {
541 amrex::Abort("MLLinOp::makePETSc: How did we get here?");
542 return {nullptr};
543 }
544#endif
545
546 [[nodiscard]] virtual bool supportNSolve () const { return false; }
547
548 virtual void copyNSolveSolution (MF&, MF const&) const {}
549
550 virtual void postSolve (Vector<MF*> const& /*sol*/) const {}
551
552 [[nodiscard]] virtual RT normInf (int amrlev, MF const& mf, bool local) const = 0;
553
554 virtual void averageDownAndSync (Vector<MF>& sol) const = 0;
555
556 virtual void avgDownResAmr (int clev, MF& cres, MF const& fres) const
557 {
558 amrex::ignore_unused(clev, cres, fres);
559 amrex::Abort("MLLinOpT::avgDownResAmr: Must be implemented for composite solves across multiple AMR levels");
560 }
561
562 // This function is needed for FMG cycle, but not V-cycle.
563 virtual void avgDownResMG (int clev, MF& cres, MF const& fres) const;
564
565 virtual void beginPrecondBC () { m_precond_mode = true; }
566 virtual void endPrecondBC () { m_precond_mode = false; }
567
568 [[nodiscard]] bool isMFIterSafe (int amrlev, int mglev1, int mglev2) const;
569
571 [[nodiscard]] int NAMRLevels () const noexcept { return m_num_amr_levels; }
572
574 [[nodiscard]] int NMGLevels (int amrlev) const noexcept { return m_num_mg_levels[amrlev]; }
575
576 [[nodiscard]] const Geometry& Geom (int amr_lev, int mglev=0) const noexcept { return m_geom[amr_lev][mglev]; }
577
578 // BC
581 // Need to save the original copy because we change the BC type to
582 // Neumann for inhomogeneous Neumann and Robin.
585
586protected:
587
588 static constexpr int mg_coarsen_ratio = 2;
589 static constexpr int mg_box_min_width = 2;
591
593
594 int verbose = 0;
595
596 int maxorder = 3;
597
599
602
604 const MLLinOpT<MF>* m_parent = nullptr;
605
607
608 bool m_do_agglomeration = false;
609 bool m_do_consolidation = false;
610
613
620
624 struct CommContainer {
625 MPI_Comm comm;
626 CommContainer (MPI_Comm m) noexcept : comm(m) {}
627 CommContainer (const CommContainer&) = delete;
628 CommContainer (CommContainer&&) = delete;
629 void operator= (const CommContainer&) = delete;
630 void operator= (CommContainer&&) = delete;
631 ~CommContainer () { // NOLINT(modernize-use-equals-default)
632#ifdef BL_USE_MPI
633 if (comm != MPI_COMM_NULL) { MPI_Comm_free(&comm); }
634#endif
635 }
636 };
638 std::unique_ptr<CommContainer> m_raii_comm;
639
642
647 const MF* m_coarse_data_for_bc = nullptr;
649
650 bool m_precond_mode = false;
651
653 [[nodiscard]] const Vector<int>& AMRRefRatio () const noexcept { return m_amr_ref_ratio; }
654
656 [[nodiscard]] int AMRRefRatio (int amr_lev) const noexcept { return m_amr_ref_ratio[amr_lev]; }
657
658 [[nodiscard]] FabFactory<FAB> const* Factory (int amr_lev, int mglev=0) const noexcept {
659 return m_factory[amr_lev][mglev].get();
660 }
661
662 [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> LoBC (int icomp = 0) const noexcept {
664 m_lobc[icomp][1],
665 m_lobc[icomp][2])}};
666 }
667 [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> HiBC (int icomp = 0) const noexcept {
669 m_hibc[icomp][1],
670 m_hibc[icomp][2])}};
671 }
672
673 [[nodiscard]] bool hasBC (BCType bct) const noexcept;
674 [[nodiscard]] bool hasInhomogNeumannBC () const noexcept;
675 [[nodiscard]] bool hasRobinBC () const noexcept;
676
677 [[nodiscard]] virtual bool supportRobinBC () const noexcept { return false; }
678 [[nodiscard]] virtual bool supportInhomogNeumannBC () const noexcept { return false; }
679
680#ifdef BL_USE_MPI
681 [[nodiscard]] bool isBottomActive () const noexcept { return m_bottom_comm != MPI_COMM_NULL; }
682#else
683 [[nodiscard]] bool isBottomActive () const noexcept { return true; }
684#endif
685 [[nodiscard]] MPI_Comm BottomCommunicator () const noexcept { return m_bottom_comm; }
686 [[nodiscard]] MPI_Comm Communicator () const noexcept { return m_default_comm; }
687
688 void setCoarseFineBCLocation (const RealVect& cloc) noexcept { m_coarse_bc_loc = cloc; }
689
690 [[nodiscard]] bool doAgglomeration () const noexcept { return m_do_agglomeration; }
691 [[nodiscard]] bool doConsolidation () const noexcept { return m_do_consolidation; }
692 [[nodiscard]] bool doSemicoarsening () const noexcept { return m_do_semicoarsening; }
693
694 [[nodiscard]] bool isCellCentered () const noexcept { return m_ixtype == 0; }
695
696 [[nodiscard]] virtual IntVect getNGrowVectRestriction () const {
697 return isCellCentered() ? IntVect(0) : IntVect(1);
698 }
699
700 virtual void make (Vector<Vector<MF> >& mf, IntVect const& ng) const;
701
702 [[nodiscard]] virtual MF make (int amrlev, int mglev, IntVect const& ng) const;
703
704 [[nodiscard]] virtual MF makeAlias (MF const& mf) const;
705
706 [[nodiscard]] virtual MF makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const;
707
708 [[nodiscard]] virtual MF makeCoarseAmr (int famrlev, IntVect const& ng) const;
709
710 [[nodiscard]] virtual std::unique_ptr<FabFactory<FAB> > makeFactory (int /*amrlev*/, int /*mglev*/) const {
711 return std::make_unique<DefaultFabFactory<FAB>>();
712 }
713
714 virtual void resizeMultiGrid (int new_size);
715
716 [[nodiscard]] bool hasHiddenDimension () const noexcept { return info.hasHiddenDimension(); }
717 [[nodiscard]] int hiddenDirection () const noexcept { return info.hidden_direction; }
718 [[nodiscard]] Box compactify (Box const& b) const noexcept;
719
720 template <typename T>
721 [[nodiscard]] Array4<T> compactify (Array4<T> const& a) const noexcept
722 {
723 if (info.hidden_direction == 0) {
724 return Array4<T>(a.dataPtr(), {a.begin.y,a.begin.z,0}, {a.end.y,a.end.z,1}, a.nComp());
725 } else if (info.hidden_direction == 1) {
726 return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.z,0}, {a.end.x,a.end.z,1}, a.nComp());
727 } else if (info.hidden_direction == 2) {
728 return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.y,0}, {a.end.x,a.end.y,1}, a.nComp());
729 } else {
730 return a;
731 }
732 }
733
734 template <typename T>
735 [[nodiscard]] T get_d0 (T const& dx, T const& dy, T const&) const noexcept
736 {
737 if (info.hidden_direction == 0) {
738 return dy;
739 } else {
740 return dx;
741 }
742 }
743
744 template <typename T>
745 [[nodiscard]] T get_d1 (T const&, T const& dy, T const& dz) const noexcept
746 {
747 if (info.hidden_direction == 0 || info.hidden_direction == 1) {
748 return dz;
749 } else {
750 return dy;
751 }
752 }
753
754private:
755
756 void defineGrids (const Vector<Geometry>& a_geom,
757 const Vector<BoxArray>& a_grids,
758 const Vector<DistributionMapping>& a_dmap,
759 const Vector<FabFactory<FAB> const*>& a_factory);
760 void defineBC ();
763 int ratio, int strategy);
765
766 virtual void checkPoint (std::string const& /*file_name*/) const {
767 amrex::Abort("MLLinOp:checkPoint: not implemented");
768 }
769
774};
775
776template <typename MF>
777void
779 const Vector<BoxArray>& a_grids,
780 const Vector<DistributionMapping>& a_dmap,
781 const LPInfo& a_info,
782 const Vector<FabFactory<FAB> const*>& a_factory,
783 [[maybe_unused]] bool eb_limit_coarsening)
784{
785 BL_PROFILE("MLLinOp::define()");
786
787 info = a_info;
788#ifdef AMREX_USE_GPU
790 {
791 if (info.agg_grid_size <= 0) { info.agg_grid_size = AMREX_D_PICK(32, 16, 8); }
792 if (info.con_grid_size <= 0) { info.con_grid_size = AMREX_D_PICK(32, 16, 8); }
793 }
794 else
795#endif
796 {
797 if (info.agg_grid_size <= 0) { info.agg_grid_size = LPInfo::getDefaultAgglomerationGridSize(); }
798 if (info.con_grid_size <= 0) { info.con_grid_size = LPInfo::getDefaultConsolidationGridSize(); }
799 }
800
801#ifdef AMREX_USE_EB
802 if (!a_factory.empty() && eb_limit_coarsening) {
803 const auto *f = dynamic_cast<EBFArrayBoxFactory const*>(a_factory[0]);
804 if (f) {
805 info.max_coarsening_level = std::min(info.max_coarsening_level,
806 f->maxCoarseningLevel());
807 }
808 }
809#endif
810 defineGrids(a_geom, a_grids, a_dmap, a_factory);
811 defineBC();
812}
813
814template <typename MF>
815void
817 const Vector<BoxArray>& a_grids,
818 const Vector<DistributionMapping>& a_dmap,
819 const Vector<FabFactory<FAB> const*>& a_factory)
820{
821 BL_PROFILE("MLLinOp::defineGrids()");
822
823#ifdef AMREX_USE_EB
824 if ( ! a_factory.empty() ) {
825 auto const* ebf = dynamic_cast<EBFArrayBoxFactory const*>(a_factory[0]);
826 if (ebf && !(ebf->isAllRegular())) { // Has non-trivial EB
827 mg_domain_min_width = 4;
828 }
829 }
830#endif
831
832 m_num_amr_levels = 0;
833 for (int amrlev = 0; amrlev < a_geom.size(); amrlev++) {
834 if (!a_grids[amrlev].empty()) {
835 m_num_amr_levels++;
836 }
837 }
838
839 m_amr_ref_ratio.resize(m_num_amr_levels);
840 m_num_mg_levels.resize(m_num_amr_levels);
841
842 m_geom.resize(m_num_amr_levels);
843 m_grids.resize(m_num_amr_levels);
844 m_dmap.resize(m_num_amr_levels);
845 m_factory.resize(m_num_amr_levels);
846
847 m_default_comm = ParallelContext::CommunicatorSub();
848
849 const RealBox& rb = a_geom[0].ProbDomain();
850 const int coord = a_geom[0].Coord();
851 const Array<int,AMREX_SPACEDIM>& is_per = a_geom[0].isPeriodic();
852
853 IntVect mg_coarsen_ratio_v(mg_coarsen_ratio);
854 IntVect mg_box_min_width_v(mg_box_min_width);
855 IntVect mg_domain_min_width_v(mg_domain_min_width);
856 if (hasHiddenDimension()) {
857 AMREX_ASSERT_WITH_MESSAGE(AMREX_SPACEDIM == 3 && m_num_amr_levels == 1,
858 "Hidden direction only supported for 3d level solve");
859 mg_coarsen_ratio_v[info.hidden_direction] = 1;
860 mg_box_min_width_v[info.hidden_direction] = 0;
861 mg_domain_min_width_v[info.hidden_direction] = 0;
862 }
863
864 // fine amr levels
865 for (int amrlev = m_num_amr_levels-1; amrlev > 0; --amrlev)
866 {
867 m_num_mg_levels[amrlev] = 1;
868 m_geom[amrlev].push_back(a_geom[amrlev]);
869 m_grids[amrlev].push_back(a_grids[amrlev]);
870 m_dmap[amrlev].push_back(a_dmap[amrlev]);
871 if (amrlev < a_factory.size()) {
872 m_factory[amrlev].emplace_back(a_factory[amrlev]->clone());
873 } else {
874 m_factory[amrlev].push_back(std::make_unique<DefaultFabFactory<FAB>>());
875 }
876
877 IntVect rr = mg_coarsen_ratio_v;
878 const Box& dom = a_geom[amrlev].Domain();
879 for (int i = 0; i < 2; ++i)
880 {
881 if (!dom.coarsenable(rr)) { amrex::Abort("MLLinOp: Uncoarsenable domain"); }
882
883 const Box& cdom = amrex::coarsen(dom,rr);
884 if (cdom == a_geom[amrlev-1].Domain()) { break; }
885
886 ++(m_num_mg_levels[amrlev]);
887
888 m_geom[amrlev].emplace_back(cdom, rb, coord, is_per);
889
890 m_grids[amrlev].push_back(a_grids[amrlev]);
891 AMREX_ASSERT(m_grids[amrlev].back().coarsenable(rr));
892 m_grids[amrlev].back().coarsen(rr);
893
894 m_dmap[amrlev].push_back(a_dmap[amrlev]);
895
896 rr *= mg_coarsen_ratio_v;
897 }
898
899#if (AMREX_SPACEDIM > 1)
900 if (hasHiddenDimension()) {
901 m_amr_ref_ratio[amrlev-1] = rr[AMREX_SPACEDIM-info.hidden_direction];
902 } else
903#endif
904 {
905 m_amr_ref_ratio[amrlev-1] = rr[0];
906 }
907 }
908
909 // coarsest amr level
910 m_num_mg_levels[0] = 1;
911 m_geom[0].push_back(a_geom[0]);
912 m_grids[0].push_back(a_grids[0]);
913 m_dmap[0].push_back(a_dmap[0]);
914 if (!a_factory.empty()) {
915 m_factory[0].emplace_back(a_factory[0]->clone());
916 } else {
917 m_factory[0].push_back(std::make_unique<DefaultFabFactory<FAB>>());
918 }
919
920 m_domain_covered.resize(m_num_amr_levels, false);
921 auto npts0 = m_grids[0][0].numPts();
922 m_domain_covered[0] = (npts0 == compactify(m_geom[0][0].Domain()).numPts());
923 for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
924 {
925 if (!m_domain_covered[amrlev-1]) { break; }
926 m_domain_covered[amrlev] = (m_grids[amrlev][0].numPts() ==
927 compactify(m_geom[amrlev][0].Domain()).numPts());
928 }
929
930 Box aggbox;
931 bool aggable = false;
932
933 if (m_grids[0][0].size() > 1 && info.do_agglomeration)
934 {
935 if (m_domain_covered[0])
936 {
937 aggbox = m_geom[0][0].Domain();
938 if (hasHiddenDimension()) {
939 aggbox.makeSlab(hiddenDirection(), m_grids[0][0][0].smallEnd(hiddenDirection()));
940 }
941 aggable = true;
942 }
943 else
944 {
945 aggbox = m_grids[0][0].minimalBox();
946 aggable = (aggbox.numPts() == npts0);
947 }
948 }
949
950 bool agged = false;
951 bool coned = false;
952 int agg_lev = 0, con_lev = 0;
953
954 AMREX_ALWAYS_ASSERT( ! (info.do_semicoarsening && info.hasHiddenDimension())
955 && info.semicoarsening_direction >= -1
956 && info.semicoarsening_direction < AMREX_SPACEDIM );
957
958 if (info.do_agglomeration && aggable)
959 {
960 Box dbx = m_geom[0][0].Domain();
961 Box bbx = aggbox;
962 Real const nbxs = static_cast<Real>(m_grids[0][0].size());
963 Real const threshold_npts = static_cast<Real>(AMREX_D_TERM(info.agg_grid_size,
964 *info.agg_grid_size,
965 *info.agg_grid_size));
966 Vector<Box> domainboxes{dbx};
967 Vector<Box> boundboxes{bbx};
968 Vector<int> agg_flag{false};
969 Vector<IntVect> accum_coarsen_ratio{IntVect(1)};
970 int numsclevs = 0;
971
972 for (int lev = 0; lev < info.max_coarsening_level; ++lev)
973 {
974 IntVect rr_level = mg_coarsen_ratio_v;
975 bool const do_semicoarsening_level = info.do_semicoarsening
976 && numsclevs < info.max_semicoarsening_level;
977 if (do_semicoarsening_level
978 && info.semicoarsening_direction != -1)
979 {
980 rr_level[info.semicoarsening_direction] = 1;
981 }
982 IntVect is_coarsenable;
983 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
984 IntVect rr_dir(1);
985 rr_dir[idim] = rr_level[idim];
986 is_coarsenable[idim] = dbx.coarsenable(rr_dir, mg_domain_min_width_v)
987 && bbx.coarsenable(rr_dir, mg_box_min_width_v);
988 if (!is_coarsenable[idim] && do_semicoarsening_level
989 && info.semicoarsening_direction == -1)
990 {
991 is_coarsenable[idim] = true;
992 rr_level[idim] = 1;
993 }
994 }
995 if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
996 break;
997 }
998 if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
999 // make sure there is at most one direction that is not coarsened
1000 int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
1001 + static_cast<int>(rr_level[1] == 1),
1002 + static_cast<int>(rr_level[2] == 1));
1003 if (n_ones > 1) { break; }
1004 }
1005 if (rr_level != mg_coarsen_ratio_v) {
1006 ++numsclevs;
1007 }
1008
1009 accum_coarsen_ratio.push_back(accum_coarsen_ratio.back()*rr_level);
1010 domainboxes.push_back(dbx.coarsen(rr_level));
1011 boundboxes.push_back(bbx.coarsen(rr_level));
1012 bool to_agg = (bbx.d_numPts() / nbxs) < 0.999*threshold_npts;
1013 agg_flag.push_back(to_agg);
1014 }
1015
1016 for (int lev = 1, nlevs = static_cast<int>(domainboxes.size()); lev < nlevs; ++lev) {
1017 if (!agged && !agg_flag[lev] &&
1018 a_grids[0].coarsenable(accum_coarsen_ratio[lev], mg_box_min_width_v))
1019 {
1020 m_grids[0].push_back(amrex::coarsen(a_grids[0], accum_coarsen_ratio[lev]));
1021 m_dmap[0].push_back(a_dmap[0]);
1022 } else {
1023 IntVect cr = domainboxes[lev-1].length() / domainboxes[lev].length();
1024 if (!m_grids[0].back().coarsenable(cr)) {
1025 break; // average_down would fail if fine boxarray is not coarsenable.
1026 }
1027 m_grids[0].emplace_back(boundboxes[lev]);
1028 IntVect max_grid_size(info.agg_grid_size);
1029 if (info.do_semicoarsening && info.max_semicoarsening_level >= lev
1030 && info.semicoarsening_direction != -1)
1031 {
1032 IntVect blen = amrex::enclosedCells(boundboxes[lev]).size();
1033 AMREX_D_TERM(int mgs_0 = (max_grid_size[0]+blen[0]-1) / blen[0];,
1034 int mgs_1 = (max_grid_size[1]+blen[1]-1) / blen[1];,
1035 int mgs_2 = (max_grid_size[2]+blen[2]-1) / blen[2]);
1036 max_grid_size[info.semicoarsening_direction]
1037 *= AMREX_D_TERM(mgs_0, *mgs_1, *mgs_2);
1038 }
1039 m_grids[0].back().maxSize(max_grid_size);
1040 m_dmap[0].push_back(DistributionMapping());
1041 if (!agged) {
1042 agged = true;
1043 agg_lev = lev;
1044 }
1045 }
1046 m_geom[0].emplace_back(domainboxes[lev],rb,coord,is_per);
1047 }
1048 }
1049 else
1050 {
1051 Long consolidation_threshold = 0;
1052 Real avg_npts = 0.0;
1053 if (info.do_consolidation) {
1054 avg_npts = static_cast<Real>(a_grids[0].d_numPts()) / static_cast<Real>(ParallelContext::NProcsSub());
1055 consolidation_threshold = AMREX_D_TERM(Long(info.con_grid_size),
1056 *info.con_grid_size,
1057 *info.con_grid_size);
1058 }
1059
1060 Box const& dom0 = a_geom[0].Domain();
1061 IntVect rr_vec(1);
1062 int numsclevs = 0;
1063 for (int lev = 0; lev < info.max_coarsening_level; ++lev)
1064 {
1065 IntVect rr_level = mg_coarsen_ratio_v;
1066 bool do_semicoarsening_level = info.do_semicoarsening
1067 && numsclevs < info.max_semicoarsening_level;
1068 if (do_semicoarsening_level
1069 && info.semicoarsening_direction != -1)
1070 {
1071 rr_level[info.semicoarsening_direction] = 1;
1072 }
1073 IntVect is_coarsenable;
1074 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1075 IntVect rr_dir(1);
1076 rr_dir[idim] = rr_vec[idim] * rr_level[idim];
1077 is_coarsenable[idim] = dom0.coarsenable(rr_dir, mg_domain_min_width_v)
1078 && a_grids[0].coarsenable(rr_dir, mg_box_min_width_v);
1079 if (!is_coarsenable[idim] && do_semicoarsening_level
1080 && info.semicoarsening_direction == -1)
1081 {
1082 is_coarsenable[idim] = true;
1083 rr_level[idim] = 1;
1084 }
1085 }
1086 if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
1087 break;
1088 }
1089 if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
1090 // make sure there is at most one direction that is not coarsened
1091 int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
1092 + static_cast<int>(rr_level[1] == 1),
1093 + static_cast<int>(rr_level[2] == 1));
1094 if (n_ones > 1) { break; }
1095 }
1096 if (rr_level != mg_coarsen_ratio_v) {
1097 ++numsclevs;
1098 }
1099 rr_vec *= rr_level;
1100
1101 m_geom[0].emplace_back(amrex::coarsen(dom0, rr_vec), rb, coord, is_per);
1102 m_grids[0].push_back(amrex::coarsen(a_grids[0], rr_vec));
1103
1104 if (info.do_consolidation)
1105 {
1106 if (avg_npts/static_cast<Real>(AMREX_D_TERM(rr_vec[0], *rr_vec[1], *rr_vec[2]))
1107 < Real(0.999)*static_cast<Real>(consolidation_threshold))
1108 {
1109 coned = true;
1110 con_lev = m_dmap[0].size();
1111 m_dmap[0].push_back(DistributionMapping());
1112 }
1113 else
1114 {
1115 m_dmap[0].push_back(m_dmap[0].back());
1116 }
1117 }
1118 else
1119 {
1120 m_dmap[0].push_back(a_dmap[0]);
1121 }
1122 }
1123 }
1124
1125 m_num_mg_levels[0] = m_grids[0].size();
1126
1127 for (int mglev = 0; mglev < m_num_mg_levels[0] - 1; mglev++){
1128 const Box& fine_domain = m_geom[0][mglev].Domain();
1129 const Box& crse_domain = m_geom[0][mglev+1].Domain();
1130 mg_coarsen_ratio_vec.push_back(fine_domain.length()/crse_domain.length());
1131 }
1132
1133 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev) {
1134 if (AMRRefRatio(amrlev) == 4 && mg_coarsen_ratio_vec.empty()) {
1135 mg_coarsen_ratio_vec.push_back(IntVect(2));
1136 }
1137 }
1138
1139 if (agged)
1140 {
1141 makeAgglomeratedDMap(m_grids[0], m_dmap[0]);
1142 }
1143 else if (coned)
1144 {
1145 makeConsolidatedDMap(m_grids[0], m_dmap[0], info.con_ratio, info.con_strategy);
1146 }
1147
1148 if (agged || coned)
1149 {
1150 m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1151 }
1152 else
1153 {
1154 m_bottom_comm = m_default_comm;
1155 }
1156
1157 m_do_agglomeration = agged;
1158 m_do_consolidation = coned;
1159
1160 if (verbose > 1) {
1161 if (agged) {
1162 Print() << "MLLinOp::defineGrids(): agglomerated AMR level 0 starting at MG level "
1163 << agg_lev << " of " << m_num_mg_levels[0] << "\n";
1164 } else if (coned) {
1165 Print() << "MLLinOp::defineGrids(): consolidated AMR level 0 starting at MG level "
1166 << con_lev << " of " << m_num_mg_levels[0]
1167 << " (ratio = " << info.con_ratio << ")" << "\n";
1168 } else {
1169 Print() << "MLLinOp::defineGrids(): no agglomeration or consolidation of AMR level 0\n";
1170 }
1171 }
1172
1173 for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
1174 {
1175 for (int mglev = 1; mglev < m_num_mg_levels[amrlev]; ++mglev)
1176 {
1177 m_factory[amrlev].emplace_back(makeFactory(amrlev,mglev));
1178 }
1179 }
1180
1181 for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
1182 {
1183 AMREX_ASSERT_WITH_MESSAGE(m_grids[amrlev][0].coarsenable(m_amr_ref_ratio[amrlev-1]),
1184 "MLLinOp: grids not coarsenable between AMR levels");
1185 }
1186}
1187
1188template <typename MF>
1189void
1191{
1192 m_needs_coarse_data_for_bc = !m_domain_covered[0];
1193
1194 levelbc_raii.resize(m_num_amr_levels);
1195 robin_a_raii.resize(m_num_amr_levels);
1196 robin_b_raii.resize(m_num_amr_levels);
1197 robin_f_raii.resize(m_num_amr_levels);
1198}
1199
1200template <typename MF>
1201void
1203 const Array<BCType,AMREX_SPACEDIM>& a_hibc) noexcept
1204{
1205 const int ncomp = getNComp();
1206 setDomainBC(Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_lobc),
1207 Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_hibc));
1208}
1209
1210template <typename MF>
1211void
1213 const Vector<Array<BCType,AMREX_SPACEDIM> >& a_hibc) noexcept
1214{
1215 const int ncomp = getNComp();
1216 AMREX_ASSERT_WITH_MESSAGE(ncomp == a_lobc.size() && ncomp == a_hibc.size(),
1217 "MLLinOp::setDomainBC: wrong size");
1218 m_lobc = a_lobc;
1219 m_hibc = a_hibc;
1220 m_lobc_orig = m_lobc;
1221 m_hibc_orig = m_hibc;
1222 for (int icomp = 0; icomp < ncomp; ++icomp) {
1223 for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1224 if (m_geom[0][0].isPeriodic(idim)) {
1225 AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] == BCType::Periodic &&
1226 m_hibc[icomp][idim] == BCType::Periodic);
1227 } else {
1228 AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] != BCType::Periodic &&
1229 m_hibc[icomp][idim] != BCType::Periodic);
1230 }
1231
1232 if (m_lobc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1233 m_lobc[icomp][idim] == LinOpBCType::Robin)
1234 {
1235 m_lobc[icomp][idim] = LinOpBCType::Neumann;
1236 }
1237
1238 if (m_hibc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1239 m_hibc[icomp][idim] == LinOpBCType::Robin)
1240 {
1241 m_hibc[icomp][idim] = LinOpBCType::Neumann;
1242 }
1243 }
1244 }
1245
1246 if (hasHiddenDimension()) {
1247 const int hd = hiddenDirection();
1248 for (int n = 0; n < ncomp; ++n) {
1249 m_lobc[n][hd] = LinOpBCType::Neumann;
1250 m_hibc[n][hd] = LinOpBCType::Neumann;
1251 }
1252 }
1253
1254 if (hasInhomogNeumannBC() && !supportInhomogNeumannBC()) {
1255 amrex::Abort("Inhomogeneous Neumann BC not supported");
1256 }
1257 if (hasRobinBC() && !supportRobinBC()) {
1258 amrex::Abort("Robin BC not supported");
1259 }
1260}
1261
1262template <typename MF>
1263bool
1264MLLinOpT<MF>::hasBC (BCType bct) const noexcept
1265{
1266 int ncomp = m_lobc_orig.size();
1267 for (int n = 0; n < ncomp; ++n) {
1268 for (int idim = 0; idim <AMREX_SPACEDIM; ++idim) {
1269 if (m_lobc_orig[n][idim] == bct || m_hibc_orig[n][idim] == bct) {
1270 return true;
1271 }
1272 }
1273 }
1274 return false;
1275}
1276
1277template <typename MF>
1278bool
1280{
1281 return hasBC(BCType::inhomogNeumann);
1282}
1283
1284template <typename MF>
1285bool
1287{
1288 return hasBC(BCType::Robin);
1289}
1290
1291template <typename MF>
1292Box
1293MLLinOpT<MF>::compactify (Box const& b) const noexcept
1294{
1295#if (AMREX_SPACEDIM == 3)
1296 if (info.hasHiddenDimension()) {
1297 const auto& lo = b.smallEnd();
1298 const auto& hi = b.bigEnd();
1299 if (info.hidden_direction == 0) {
1300 return Box(IntVect(lo[1],lo[2],0), IntVect(hi[1],hi[2],0), b.ixType());
1301 } else if (info.hidden_direction == 1) {
1302 return Box(IntVect(lo[0],lo[2],0), IntVect(hi[0],hi[2],0), b.ixType());
1303 } else {
1304 return Box(IntVect(lo[0],lo[1],0), IntVect(hi[0],hi[1],0), b.ixType());
1305 }
1306 } else
1307#endif
1308 {
1309 return b;
1310 }
1311}
1312
1313template <typename MF>
1314void
1317{
1318 BL_PROFILE("MLLinOp::makeAgglomeratedDMap");
1319
1320 BL_ASSERT(!dm[0].empty());
1321 for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1322 {
1323 if (dm[i].empty())
1324 {
1325 const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1326
1327 const int nprocs = ParallelContext::NProcsSub();
1328 AMREX_ASSERT(static_cast<int>(sfc.size()) == nprocs);
1329
1330 Vector<int> pmap(ba[i].size());
1331 for (int iproc = 0; iproc < nprocs; ++iproc) {
1332 int grank = ParallelContext::local_to_global_rank(iproc);
1333 for (int ibox : sfc[iproc]) {
1334 pmap[ibox] = grank;
1335 }
1336 }
1337 dm[i].define(std::move(pmap));
1338 }
1339 }
1340}
1341
1342template <typename MF>
1343void
1346 int ratio, int strategy)
1347{
1348 BL_PROFILE("MLLinOp::makeConsolidatedDMap()");
1349
1350 int factor = 1;
1351 BL_ASSERT(!dm[0].empty());
1352 for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1353 {
1354 if (dm[i].empty())
1355 {
1356 factor *= ratio;
1357
1358 const int nprocs = ParallelContext::NProcsSub();
1359 const auto& pmap_fine = dm[i-1].ProcessorMap();
1360 Vector<int> pmap(pmap_fine.size());
1361 ParallelContext::global_to_local_rank(pmap.data(), pmap_fine.data(), static_cast<int>(pmap.size()));
1362 if (strategy == 1) {
1363 for (auto& x: pmap) {
1364 x /= ratio;
1365 }
1366 } else if (strategy == 2) {
1367 int nprocs_con = static_cast<int>(std::ceil(static_cast<Real>(nprocs)
1368 / static_cast<Real>(factor)));
1369 for (auto& x: pmap) {
1370 auto d = std::div(x,nprocs_con);
1371 x = d.rem;
1372 }
1373 } else if (strategy == 3) {
1374 if (factor == ratio) {
1375 const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1376 for (int iproc = 0; iproc < nprocs; ++iproc) {
1377 for (int ibox : sfc[iproc]) {
1378 pmap[ibox] = iproc;
1379 }
1380 }
1381 }
1382 for (auto& x: pmap) {
1383 x /= ratio;
1384 }
1385 }
1386
1388 dm[i].define(std::move(pmap));
1389 } else {
1390 Vector<int> pmap_g(pmap.size());
1391 ParallelContext::local_to_global_rank(pmap_g.data(), pmap.data(), static_cast<int>(pmap.size()));
1392 dm[i].define(std::move(pmap_g));
1393 }
1394 }
1395 }
1396}
1397
1398template <typename MF>
1401{
1402 BL_PROFILE("MLLinOp::makeSubCommunicator()");
1403
1404#ifdef BL_USE_MPI
1405
1406 Vector<int> newgrp_ranks = dm.ProcessorMap();
1407 std::sort(newgrp_ranks.begin(), newgrp_ranks.end());
1408 auto last = std::unique(newgrp_ranks.begin(), newgrp_ranks.end());
1409 newgrp_ranks.erase(last, newgrp_ranks.end());
1410
1411 MPI_Comm newcomm;
1412 MPI_Group defgrp, newgrp;
1413 MPI_Comm_group(m_default_comm, &defgrp);
1415 MPI_Group_incl(defgrp, static_cast<int>(newgrp_ranks.size()), newgrp_ranks.data(), &newgrp);
1416 } else {
1417 Vector<int> local_newgrp_ranks(newgrp_ranks.size());
1418 ParallelContext::global_to_local_rank(local_newgrp_ranks.data(),
1419 newgrp_ranks.data(), static_cast<int>(newgrp_ranks.size()));
1420 MPI_Group_incl(defgrp, static_cast<int>(local_newgrp_ranks.size()), local_newgrp_ranks.data(), &newgrp);
1421 }
1422
1423 MPI_Comm_create(m_default_comm, newgrp, &newcomm);
1424
1425 m_raii_comm = std::make_unique<CommContainer>(newcomm);
1426
1427 MPI_Group_free(&defgrp);
1428 MPI_Group_free(&newgrp);
1429
1430 return newcomm;
1431#else
1433 return m_default_comm;
1434#endif
1435}
1436
1437template <typename MF>
1438void
1440 const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept
1441{
1442 m_domain_bloc_lo = lo_bcloc;
1443 m_domain_bloc_hi = hi_bcloc;
1444}
1445
1446template <typename MF>
1447void
1448MLLinOpT<MF>::setCoarseFineBC (const MF* crse, int crse_ratio,
1449 LinOpBCType bc_type) noexcept
1450{
1451 setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1452}
1453
1454template <typename MF>
1455void
1456MLLinOpT<MF>::setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
1457 LinOpBCType bc_type) noexcept
1458{
1459 m_coarse_data_for_bc = crse;
1460 m_coarse_data_crse_ratio = crse_ratio;
1461 m_coarse_fine_bc_type = bc_type;
1462}
1463
1464template <typename MF>
1465template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1466void
1467MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, int crse_ratio,
1468 LinOpBCType bc_type) noexcept
1469{
1470 setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1471}
1472
1473template <typename MF>
1474template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1475void
1476MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
1477 LinOpBCType bc_type) noexcept
1478{
1479 m_coarse_data_for_bc_raii = MF(crse->boxArray(), crse->DistributionMap(),
1480 crse->nComp(), crse->nGrowVect());
1481 m_coarse_data_for_bc_raii.LocalCopy(*crse, 0, 0, crse->nComp(),
1482 crse->nGrowVect());
1483 m_coarse_data_for_bc = &m_coarse_data_for_bc_raii;
1484 m_coarse_data_crse_ratio = crse_ratio;
1485 m_coarse_fine_bc_type = bc_type;
1486}
1487
1488template <typename MF>
1489void
1491{
1492 mf.clear();
1493 mf.resize(m_num_amr_levels);
1494 for (int alev = 0; alev < m_num_amr_levels; ++alev) {
1495 mf[alev].resize(m_num_mg_levels[alev]);
1496 for (int mlev = 0; mlev < m_num_mg_levels[alev]; ++mlev) {
1497 mf[alev][mlev] = make(alev, mlev, ng);
1498 }
1499 }
1500}
1501
1502template <typename MF>
1503MF
1504MLLinOpT<MF>::make (int amrlev, int mglev, IntVect const& ng) const
1505{
1506 if constexpr (IsMultiFabLike_v<MF>) {
1507 return MF(amrex::convert(m_grids[amrlev][mglev], m_ixtype),
1508 m_dmap[amrlev][mglev], getNComp(), ng, MFInfo(),
1509 *m_factory[amrlev][mglev]);
1510 } else {
1511 amrex::ignore_unused(amrlev, mglev, ng);
1512 amrex::Abort("MLLinOpT::make: how did we get here?");
1513 return {};
1514 }
1515}
1516
1517template <typename MF>
1518MF
1519MLLinOpT<MF>::makeAlias (MF const& mf) const
1520{
1521 if constexpr (IsMultiFabLike_v<MF>) {
1522 return MF(mf, amrex::make_alias, 0, mf.nComp());
1523 } else {
1525 amrex::Abort("MLLinOpT::makeAlias: how did we get here?");
1526 return {};
1527 }
1528}
1529
1530template <typename MF>
1531MF
1532MLLinOpT<MF>::makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const
1533{
1534 if constexpr (IsMultiFabLike_v<MF>) {
1535 BoxArray cba = m_grids[amrlev][mglev];
1536 IntVect ratio = (amrlev > 0) ? IntVect(2) : mg_coarsen_ratio_vec[mglev];
1537 cba.coarsen(ratio);
1538 cba.convert(m_ixtype);
1539 return MF(cba, m_dmap[amrlev][mglev], getNComp(), ng);
1540 } else {
1541 amrex::ignore_unused(amrlev, mglev, ng);
1542 amrex::Abort("MLLinOpT::makeCoarseMG: how did we get here?");
1543 return {};
1544 }
1545}
1546
1547template <typename MF>
1548MF
1549MLLinOpT<MF>::makeCoarseAmr (int famrlev, IntVect const& ng) const
1550{
1551 if constexpr (IsMultiFabLike_v<MF>) {
1552 BoxArray cba = m_grids[famrlev][0];
1553 IntVect ratio(AMRRefRatio(famrlev-1));
1554 cba.coarsen(ratio);
1555 cba.convert(m_ixtype);
1556 return MF(cba, m_dmap[famrlev][0], getNComp(), ng);
1557 } else {
1558 amrex::ignore_unused(famrlev, ng);
1559 amrex::Abort("MLLinOpT::makeCoarseAmr: how did we get here?");
1560 return {};
1561 }
1562}
1563
1564template <typename MF>
1565void
1567{
1568 if (new_size <= 0 || new_size >= m_num_mg_levels[0]) { return; }
1569
1570 m_num_mg_levels[0] = new_size;
1571
1572 m_geom[0].resize(new_size);
1573 m_grids[0].resize(new_size);
1574 m_dmap[0].resize(new_size);
1575 m_factory[0].resize(new_size);
1576
1577 if (m_bottom_comm != m_default_comm) {
1578 m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1579 }
1580}
1581
1582template <typename MF>
1583void
1584MLLinOpT<MF>::avgDownResMG (int clev, MF& cres, MF const& fres) const
1585{
1586 amrex::ignore_unused(clev, cres, fres);
1587 if constexpr (amrex::IsFabArray<MF>::value) {
1588 const int ncomp = this->getNComp();
1589#ifdef AMREX_USE_EB
1590 if (!fres.isAllRegular()) {
1591 if constexpr (std::is_same<MF,MultiFab>()) {
1592 amrex::EB_average_down(fres, cres, 0, ncomp,
1593 mg_coarsen_ratio_vec[clev-1]);
1594 } else {
1595 amrex::Abort("EB_average_down only works with MultiFab");
1596 }
1597 } else
1598#endif
1599 {
1600 amrex::average_down(fres, cres, 0, ncomp, mg_coarsen_ratio_vec[clev-1]);
1601 }
1602 } else {
1603 amrex::Abort("For non-FabArray, MLLinOpT<MF>::avgDownResMG should be overridden.");
1604 }
1605}
1606
1607template <typename MF>
1608bool
1609MLLinOpT<MF>::isMFIterSafe (int amrlev, int mglev1, int mglev2) const
1610{
1611 return m_dmap[amrlev][mglev1] == m_dmap[amrlev][mglev2]
1612 && BoxArray::SameRefs(m_grids[amrlev][mglev1], m_grids[amrlev][mglev2]);
1613}
1614
1615template <typename MF>
1616template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF> &&
1617 IsMultiFabLike_v<AMF>, int> >
1618void
1619MLLinOpT<MF>::setLevelBC (int amrlev, const AMF* levelbcdata,
1620 const AMF* robinbc_a, const AMF* robinbc_b,
1621 const AMF* robinbc_f)
1622{
1623 const int ncomp = this->getNComp();
1624 if (levelbcdata) {
1625 levelbc_raii[amrlev] = std::make_unique<MF>(levelbcdata->boxArray(),
1626 levelbcdata->DistributionMap(),
1627 ncomp, levelbcdata->nGrowVect());
1628 levelbc_raii[amrlev]->LocalCopy(*levelbcdata, 0, 0, ncomp,
1629 levelbcdata->nGrowVect());
1630 } else {
1631 levelbc_raii[amrlev].reset();
1632 }
1633
1634 if (robinbc_a) {
1635 robin_a_raii[amrlev] = std::make_unique<MF>(robinbc_a->boxArray(),
1636 robinbc_a->DistributionMap(),
1637 ncomp, robinbc_a->nGrowVect());
1638 robin_a_raii[amrlev]->LocalCopy(*robinbc_a, 0, 0, ncomp,
1639 robinbc_a->nGrowVect());
1640 } else {
1641 robin_a_raii[amrlev].reset();
1642 }
1643
1644 if (robinbc_b) {
1645 robin_b_raii[amrlev] = std::make_unique<MF>(robinbc_b->boxArray(),
1646 robinbc_b->DistributionMap(),
1647 ncomp, robinbc_b->nGrowVect());
1648 robin_b_raii[amrlev]->LocalCopy(*robinbc_b, 0, 0, ncomp,
1649 robinbc_b->nGrowVect());
1650 } else {
1651 robin_b_raii[amrlev].reset();
1652 }
1653
1654 if (robinbc_f) {
1655 robin_f_raii[amrlev] = std::make_unique<MF>(robinbc_f->boxArray(),
1656 robinbc_f->DistributionMap(),
1657 ncomp, robinbc_f->nGrowVect());
1658 robin_f_raii[amrlev]->LocalCopy(*robinbc_f, 0, 0, ncomp,
1659 robinbc_f->nGrowVect());
1660 } else {
1661 robin_f_raii[amrlev].reset();
1662 }
1663
1664 this->setLevelBC(amrlev, levelbc_raii[amrlev].get(), robin_a_raii[amrlev].get(),
1665 robin_b_raii[amrlev].get(), robin_f_raii[amrlev].get());
1666}
1667
1668template <typename MF>
1669auto
1671{
1672 AMREX_ALWAYS_ASSERT(NAMRLevels() == 1);
1673 return xdoty(0,0,*x[0],*y[0],false);
1674}
1675
1676template <typename MF>
1677auto
1679{
1680 AMREX_ALWAYS_ASSERT(NAMRLevels() == 1);
1681 auto r = xdoty(0,0,*x[0],*x[0],false);
1682 return std::sqrt(r);
1683}
1684
1685extern template class MLLinOpT<MultiFab>;
1686
1689
1690}
1691
1692#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition AMReX_BLassert.H:39
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
Array4< Real > fine
Definition AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition AMReX_InterpFaceRegister.cpp:92
#define AMREX_D_TERM(a, b, c)
Definition AMReX_SPACE.H:172
#define AMREX_D_PICK(a, b, c)
Definition AMReX_SPACE.H:173
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:567
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition AMReX_BoxArray.H:840
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
Definition AMReX_BoxArray.cpp:672
BoxArray & convert(IndexType typ)
Apply Box::convert(IndexType) to each Box in the BoxArray.
Definition AMReX_BoxArray.cpp:813
__host__ __device__ BoxND & makeSlab(int direction, int slab_index) noexcept
Definition AMReX_Box.H:826
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ bool coarsenable(const IntVectND< dim > &refrat, const IntVectND< dim > &min_width) const noexcept
Return whether this Box is coarsenable.
Definition AMReX_Box.H:770
__host__ __device__ double d_numPts() const noexcept
Return the number of points contained in the BoxND. This is intended for use only in diagnostic messa...
Definition AMReX_Box.H:376
__host__ __device__ BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition AMReX_Box.H:722
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
Definition AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
static DistributionMapping makeSFC(const MultiFab &weight, bool sort=true)
Definition AMReX_DistributionMapping.cpp:1768
const Vector< int > & ProcessorMap() const noexcept
Returns a constant reference to the mapping of boxes in the underlying BoxArray to the CPU that holds...
Definition AMReX_DistributionMapping.cpp:47
Definition AMReX_EBFabFactory.H:25
Definition AMReX_FabFactory.H:50
Solve using GMRES with multigrid as preconditioner.
Definition AMReX_GMRES_MLMG.H:22
Rectangular problem domain geometry.
Definition AMReX_Geometry.H:74
Interface
Definition AMReX_Hypre.H:21
__host__ static __device__ constexpr std::size_t size() noexcept
Definition AMReX_IntVect.H:733
__host__ __device__ IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition AMReX_IntVect.H:776
Definition AMReX_MLABecLaplacian.H:15
Definition AMReX_MLCGSolver.H:12
Definition AMReX_MLLinOp.H:102
const MF * m_coarse_data_for_bc
Definition AMReX_MLLinOp.H:647
Vector< Vector< std::unique_ptr< FabFactory< FAB > > > > m_factory
Definition AMReX_MLLinOp.H:618
virtual void avgDownResMG(int clev, MF &cres, MF const &fres) const
Definition AMReX_MLLinOp.H:1584
int NAMRLevels() const noexcept
Return the number of AMR levels.
Definition AMReX_MLLinOp.H:571
bool m_do_consolidation
Definition AMReX_MLLinOp.H:609
bool isCellCentered() const noexcept
Definition AMReX_MLLinOp.H:694
IntVect m_ixtype
Definition AMReX_MLLinOp.H:606
void setVerbose(int v) noexcept
Set verbosity.
Definition AMReX_MLLinOp.H:249
bool isMFIterSafe(int amrlev, int mglev1, int mglev2) const
Definition AMReX_MLLinOp.H:1609
RealVect m_coarse_bc_loc
Definition AMReX_MLLinOp.H:646
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition AMReX_MLLinOp.H:271
virtual void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const =0
Add interpolated coarse MG level data to fine MG level data.
virtual void setLevelBC(int, const MF *, const MF *=nullptr, const MF *=nullptr, const MF *=nullptr)=0
Set boundary conditions for given level. For cell-centered solves only.
virtual MF make(int amrlev, int mglev, IntVect const &ng) const
Definition AMReX_MLLinOp.H:1504
FabFactory< FAB > const * Factory(int amr_lev, int mglev=0) const noexcept
Definition AMReX_MLLinOp.H:658
Array< Real, 3 > m_domain_bloc_hi
Definition AMReX_MLLinOp.H:641
T get_d0(T const &dx, T const &dy, T const &) const noexcept
Definition AMReX_MLLinOp.H:735
MPI_Comm BottomCommunicator() const noexcept
Definition AMReX_MLLinOp.H:685
void setEnforceSingularSolvable(bool o) noexcept
Definition AMReX_MLLinOp.H:258
MPI_Comm Communicator() const noexcept
Definition AMReX_MLLinOp.H:686
int mg_domain_min_width
Definition AMReX_MLLinOp.H:590
void setMaxOrder(int o) noexcept
Set order of interpolation at coarse/fine boundary.
Definition AMReX_MLLinOp.H:252
virtual void compGrad(int amrlev, const Array< MF *, 3 > &grad, MF &sol, Location loc) const
Compute gradients of the solution.
Definition AMReX_MLLinOp.H:444
virtual void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const
Overwrite fine MG level data with interpolated coarse data.
Definition AMReX_MLLinOp.H:304
virtual std::string name() const
Definition AMReX_MLLinOp.H:135
void setCoarseFineBC(const AMF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition AMReX_MLLinOp.H:1476
GpuArray< BCType, 3 > LoBC(int icomp=0) const noexcept
Definition AMReX_MLLinOp.H:662
bool doAgglomeration() const noexcept
Definition AMReX_MLLinOp.H:690
Vector< std::unique_ptr< MF > > levelbc_raii
Definition AMReX_MLLinOp.H:770
MF m_coarse_data_for_bc_raii
Definition AMReX_MLLinOp.H:648
virtual void setDirichletNodesToZero(int, int, MF &) const
Definition AMReX_MLLinOp.H:484
MLLinOpT< MF > & operator=(const MLLinOpT< MF > &)=delete
std::unique_ptr< CommContainer > m_raii_comm
Definition AMReX_MLLinOp.H:638
bool m_do_semicoarsening
Definition AMReX_MLLinOp.H:611
bool hasRobinBC() const noexcept
Definition AMReX_MLLinOp.H:1286
Vector< Array< BCType, 3 > > m_hibc
Definition AMReX_MLLinOp.H:580
virtual void resizeMultiGrid(int new_size)
Definition AMReX_MLLinOp.H:1566
Vector< Vector< BoxArray > > m_grids
Definition AMReX_MLLinOp.H:616
virtual MF makeCoarseAmr(int famrlev, IntVect const &ng) const
Definition AMReX_MLLinOp.H:1549
Vector< std::unique_ptr< MF > > robin_f_raii
Definition AMReX_MLLinOp.H:773
bool m_do_agglomeration
Definition AMReX_MLLinOp.H:608
void setCoarseFineBC(const AMF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition AMReX_MLLinOp.H:1467
virtual MF makeAlias(MF const &mf) const
Definition AMReX_MLLinOp.H:1519
Array4< T > compactify(Array4< T > const &a) const noexcept
Definition AMReX_MLLinOp.H:721
static constexpr int mg_coarsen_ratio
Definition AMReX_MLLinOp.H:588
virtual void copyNSolveSolution(MF &, MF const &) const
Definition AMReX_MLLinOp.H:548
virtual void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr)=0
Compute residual for solution.
int getMaxOrder() const noexcept
Get order of interpolation at coarse/fine boundary.
Definition AMReX_MLLinOp.H:254
virtual int getNComp() const
Return number of components.
Definition AMReX_MLLinOp.H:266
virtual void getFluxes(const Vector< MF * > &, const Vector< MF * > &) const
Definition AMReX_MLLinOp.H:513
void defineBC()
Definition AMReX_MLLinOp.H:1190
void setCoarseFineBCLocation(const RealVect &cloc) noexcept
Definition AMReX_MLLinOp.H:688
Vector< int > m_amr_ref_ratio
Definition AMReX_MLLinOp.H:601
MPI_Comm m_default_comm
Definition AMReX_MLLinOp.H:621
static void makeAgglomeratedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm)
Definition AMReX_MLLinOp.H:1315
bool isBottomActive() const noexcept
Definition AMReX_MLLinOp.H:683
virtual Vector< RT > getSolvabilityOffset(int, int, MF const &) const
get offset for fixing solvability
Definition AMReX_MLLinOp.H:471
void defineGrids(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const Vector< FabFactory< FAB > const * > &a_factory)
Definition AMReX_MLLinOp.H:816
virtual void getFluxes(const Vector< Array< MF *, 3 > > &, const Vector< MF * > &, Location) const
Definition AMReX_MLLinOp.H:508
virtual BottomSolver getDefaultBottomSolver() const
Definition AMReX_MLLinOp.H:263
typename FabDataType< MF >::fab_type FAB
Definition AMReX_MLLinOp.H:112
virtual RT normInf(int amrlev, MF const &mf, bool local) const =0
Vector< int > m_num_mg_levels
Definition AMReX_MLLinOp.H:603
bool hasBC(BCType bct) const noexcept
Definition AMReX_MLLinOp.H:1264
static void makeConsolidatedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm, int ratio, int strategy)
Definition AMReX_MLLinOp.H:1344
void setDomainBC(const Vector< Array< BCType, 3 > > &lobc, const Vector< Array< BCType, 3 > > &hibc) noexcept
Boundary of the whole domain.
Definition AMReX_MLLinOp.H:1212
Vector< Vector< DistributionMapping > > m_dmap
Definition AMReX_MLLinOp.H:617
int verbose
Definition AMReX_MLLinOp.H:594
virtual void checkPoint(std::string const &) const
Definition AMReX_MLLinOp.H:766
IntVect m_coarse_data_crse_ratio
Definition AMReX_MLLinOp.H:645
virtual void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr)=0
Compute residual for the residual-correction form, resid = b - L(x)
const Vector< int > & AMRRefRatio() const noexcept
Return AMR refinement ratios.
Definition AMReX_MLLinOp.H:653
MLLinOpT(MLLinOpT< MF > &&)=delete
Vector< Array< BCType, 3 > > m_hibc_orig
Definition AMReX_MLLinOp.H:584
void setCoarseFineBC(const MF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Set coarse/fine boundary conditions. For cell-centered solves only.
Definition AMReX_MLLinOp.H:1448
virtual void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const =0
Apply the linear operator, out = L(in)
bool needsCoarseDataForBC() const noexcept
Needs coarse data for bc?
Definition AMReX_MLLinOp.H:181
typename FabDataType< MF >::value_type RT
Definition AMReX_MLLinOp.H:113
virtual void update()
Update for reuse.
Definition AMReX_MLLinOp.H:273
Vector< Array< BCType, 3 > > m_lobc_orig
Definition AMReX_MLLinOp.H:583
bool m_precond_mode
Definition AMReX_MLLinOp.H:650
virtual std::unique_ptr< FabFactory< FAB > > makeFactory(int, int) const
Definition AMReX_MLLinOp.H:710
virtual void applyMetricTerm(int, int, MF &) const
apply metric terms if there are any
Definition AMReX_MLLinOp.H:452
virtual void unapplyMetricTerm(int, int, MF &) const
unapply metric terms if there are any
Definition AMReX_MLLinOp.H:454
bool hasHiddenDimension() const noexcept
Definition AMReX_MLLinOp.H:716
virtual bool isBottomSingular() const =0
Is the bottom of MG singular?
virtual void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &crse_rhs, MF &fine_res, MF &fine_sol, const MF &fine_rhs) const
Reflux at AMR coarse/fine boundary.
Definition AMReX_MLLinOp.H:412
virtual IntVect getNGrowVectRestriction() const
Definition AMReX_MLLinOp.H:696
Vector< std::unique_ptr< MF > > robin_b_raii
Definition AMReX_MLLinOp.H:772
virtual void compFlux(int amrlev, const Array< MF *, 3 > &fluxes, MF &sol, Location loc) const
Compute fluxes.
Definition AMReX_MLLinOp.H:429
static constexpr int mg_box_min_width
Definition AMReX_MLLinOp.H:589
virtual RT dotProductPrecond(Vector< MF const * > const &x, Vector< MF const * > const &y) const
Definition AMReX_MLLinOp.H:1670
virtual MF makeCoarseMG(int amrlev, int mglev, IntVect const &ng) const
Definition AMReX_MLLinOp.H:1532
virtual void fixSolvabilityByOffset(int, int, MF &, Vector< RT > const &) const
fix solvability by subtracting offset from RHS
Definition AMReX_MLLinOp.H:475
const Geometry & Geom(int amr_lev, int mglev=0) const noexcept
Definition AMReX_MLLinOp.H:576
virtual void endPrecondBC()
Definition AMReX_MLLinOp.H:566
int hiddenDirection() const noexcept
Definition AMReX_MLLinOp.H:717
void setDomainBCLoc(const Array< Real, 3 > &lo_bcloc, const Array< Real, 3 > &hi_bcloc) noexcept
Set location of domain boundaries.
Definition AMReX_MLLinOp.H:1439
Vector< Array< BCType, 3 > > m_lobc
Definition AMReX_MLLinOp.H:579
Vector< int > m_domain_covered
Definition AMReX_MLLinOp.H:619
const MLLinOpT< MF > * m_parent
Definition AMReX_MLLinOp.H:604
bool doSemicoarsening() const noexcept
Definition AMReX_MLLinOp.H:692
virtual bool supportNSolve() const
Definition AMReX_MLLinOp.H:546
virtual bool supportRobinBC() const noexcept
Definition AMReX_MLLinOp.H:677
virtual void normalize(int amrlev, int mglev, MF &mf) const
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition AMReX_MLLinOp.H:369
virtual void applyInhomogNeumannTerm(int, MF &) const
Extra terms introduced when we treat inhomogeneous Nuemann BC as homogeneous.
Definition AMReX_MLLinOp.H:460
virtual void avgDownResAmr(int clev, MF &cres, MF const &fres) const
Definition AMReX_MLLinOp.H:556
Vector< Vector< Geometry > > m_geom
first Vector is for amr level and second is mg level
Definition AMReX_MLLinOp.H:615
virtual RT norm2Precond(Vector< MF const * > const &x) const
Definition AMReX_MLLinOp.H:1678
MLLinOpT(const MLLinOpT< MF > &)=delete
virtual bool scaleRHS(int, MF *) const
scale RHS to fix solvability
Definition AMReX_MLLinOp.H:466
virtual void averageDownAndSync(Vector< MF > &sol) const =0
void setCoarseFineBC(const MF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition AMReX_MLLinOp.H:1456
MLLinOpT()=default
Box compactify(Box const &b) const noexcept
Definition AMReX_MLLinOp.H:1293
bool m_needs_coarse_data_for_bc
Definition AMReX_MLLinOp.H:643
GpuArray< BCType, 3 > HiBC(int icomp=0) const noexcept
Definition AMReX_MLLinOp.H:667
int maxorder
Definition AMReX_MLLinOp.H:596
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition AMReX_MLLinOp.H:778
Vector< IntVect > mg_coarsen_ratio_vec
Definition AMReX_MLLinOp.H:612
MF MFType
Definition AMReX_MLLinOp.H:111
virtual void preparePrecond()
Definition AMReX_MLLinOp.H:480
virtual ~MLLinOpT()=default
virtual void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs)
Average-down data from fine AMR level to coarse AMR level.
Definition AMReX_MLLinOp.H:334
LPInfo info
Definition AMReX_MLLinOp.H:592
MPI_Comm makeSubCommunicator(const DistributionMapping &dm)
Definition AMReX_MLLinOp.H:1400
int NMGLevels(int amrlev) const noexcept
Return the number of MG levels at given AMR level.
Definition AMReX_MLLinOp.H:574
T get_d1(T const &, T const &dy, T const &dz) const noexcept
Definition AMReX_MLLinOp.H:745
bool enforceSingularSolvable
Definition AMReX_MLLinOp.H:598
void setLevelBC(int amrlev, const AMF *levelbcdata, const AMF *robinbc_a=nullptr, const AMF *robinbc_b=nullptr, const AMF *robinbc_f=nullptr)
Definition AMReX_MLLinOp.H:1619
virtual RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const =0
x dot y, used by the bottom solver
virtual void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const
Interpolation between AMR levels.
Definition AMReX_MLLinOp.H:318
bool doConsolidation() const noexcept
Definition AMReX_MLLinOp.H:691
virtual std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const
Definition AMReX_MLLinOp.H:502
virtual bool supportInhomogNeumannBC() const noexcept
Definition AMReX_MLLinOp.H:678
virtual void prepareForFluxes(int, const MF *=nullptr)
Definition AMReX_MLLinOp.H:385
LinOpBCType m_coarse_fine_bc_type
Definition AMReX_MLLinOp.H:644
Array< Real, 3 > m_domain_bloc_lo
Definition AMReX_MLLinOp.H:640
virtual bool isSingular(int amrlev) const =0
Is it singular on given AMR level?
virtual void postSolve(Vector< MF * > const &) const
Definition AMReX_MLLinOp.H:550
int AMRRefRatio(int amr_lev) const noexcept
Return AMR refinement ratio at given AMR level.
Definition AMReX_MLLinOp.H:656
MPI_Comm m_bottom_comm
Definition AMReX_MLLinOp.H:622
virtual void restriction(int amrlev, int cmglev, MF &crse, MF &fine) const =0
Restriction onto coarse MG level.
virtual void beginPrecondBC()
Definition AMReX_MLLinOp.H:565
virtual void applyOverset(int, MF &) const
for overset solver only
Definition AMReX_MLLinOp.H:463
virtual void getEBFluxes(const Vector< MF * > &, const Vector< MF * > &) const
Definition AMReX_MLLinOp.H:519
virtual void unimposeNeumannBC(int, MF &) const
This is needed for our nodal projection solver.
Definition AMReX_MLLinOp.H:457
bool getEnforceSingularSolvable() const noexcept
Definition AMReX_MLLinOp.H:261
virtual void prepareForSolve()=0
virtual void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary, int niter) const =0
Smooth.
virtual void make(Vector< Vector< MF > > &mf, IntVect const &ng) const
Definition AMReX_MLLinOp.H:1490
bool hasInhomogNeumannBC() const noexcept
Definition AMReX_MLLinOp.H:1279
Vector< std::unique_ptr< MF > > robin_a_raii
Definition AMReX_MLLinOp.H:771
void setDomainBC(const Array< BCType, 3 > &lobc, const Array< BCType, 3 > &hibc) noexcept
Boundary of the whole domain.
Definition AMReX_MLLinOp.H:1202
virtual int getNGrow(int=0, int=0) const
Definition AMReX_MLLinOp.H:268
int m_num_amr_levels
Definition AMReX_MLLinOp.H:600
Definition AMReX_MLMGBndry.H:12
Definition AMReX_MLMG.H:17
Definition AMReX_MLPoisson.H:19
This class provides the user with a few print options.
Definition AMReX_Print.H:35
A Box with real dimensions.
Definition AMReX_RealBox.H:26
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
Long size() const noexcept
Definition AMReX_Vector.H:53
amrex_real Real
Floating Point Type for Fields.
Definition AMReX_REAL.H:79
amrex_long Long
Definition AMReX_INT.H:30
__host__ __device__ BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) coarsening ratio.
Definition AMReX_Box.H:1409
std::array< T, N > Array
Definition AMReX_Array.H:25
bool notInLaunchRegion() noexcept
Definition AMReX_GpuControl.H:93
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int local_to_global_rank(int rank) noexcept
translate between local rank and global rank
Definition AMReX_ParallelContext.H:95
int global_to_local_rank(int rank) noexcept
Definition AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition AMReX_ParallelContext.H:74
MPI_Comm Communicator() noexcept
Definition AMReX_ParallelDescriptor.H:212
int MPI_Comm
Definition AMReX_ccse-mpi.H:51
int MPI_Group
Definition AMReX_ccse-mpi.H:52
static constexpr int MPI_COMM_NULL
Definition AMReX_ccse-mpi.H:59
Definition AMReX_Amr.cpp:49
@ make_alias
Definition AMReX_MakeType.H:7
__host__ __device__ void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition AMReX.H:138
__host__ __device__ BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Return a BoxND with different type.
Definition AMReX_Box.H:1558
__host__ __device__ constexpr GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition AMReX_Tuple.H:186
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition AMReX_MultiFabUtil.cpp:336
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:27
LinOpBCType
Definition AMReX_LO_BCTYPES.H:22
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition AMReX_EBMultiFabUtil.cpp:336
__host__ __device__ BoxND< dim > enclosedCells(const BoxND< dim > &b, int dir) noexcept
Return a BoxND with CELL based coordinates in direction dir that is enclosed by b....
Definition AMReX_Box.H:1586
BottomSolver
Definition AMReX_MLLinOp.H:31
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
void Warning(const std::string &msg)
Print out warning message to cerr.
Definition AMReX.cpp:236
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:230
int verbose
Definition AMReX_DistributionMapping.cpp:36
std::unique_ptr< Hypre > makeHypre(const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask)
Definition AMReX_Hypre.cpp:12
Definition AMReX_Array4.H:61
Definition AMReX_FabDataType.H:9
Fixed-size array that can be used on GPU.
Definition AMReX_Array.H:40
Definition AMReX_TypeTraits.H:29
Definition AMReX_MLLinOp.H:36
LPInfo & setConsolidationRatio(int x) noexcept
Definition AMReX_MLLinOp.H:56
int con_strategy
Definition AMReX_MLLinOp.H:43
bool do_semicoarsening
Definition AMReX_MLLinOp.H:39
bool has_metric_term
Definition AMReX_MLLinOp.H:44
LPInfo & setConsolidationGridSize(int x) noexcept
Definition AMReX_MLLinOp.H:55
int max_semicoarsening_level
Definition AMReX_MLLinOp.H:46
bool hasHiddenDimension() const noexcept
Definition AMReX_MLLinOp.H:65
int con_ratio
Definition AMReX_MLLinOp.H:42
bool do_consolidation
Definition AMReX_MLLinOp.H:38
int con_grid_size
Definition AMReX_MLLinOp.H:41
LPInfo & setSemicoarsening(bool x) noexcept
Definition AMReX_MLLinOp.H:53
LPInfo & setHiddenDirection(int n) noexcept
Definition AMReX_MLLinOp.H:62
LPInfo & setConsolidation(bool x) noexcept
Definition AMReX_MLLinOp.H:52
LPInfo & setSemicoarseningDirection(int n) noexcept
Definition AMReX_MLLinOp.H:61
LPInfo & setMaxSemicoarseningLevel(int n) noexcept
Definition AMReX_MLLinOp.H:60
bool do_agglomeration
Definition AMReX_MLLinOp.H:37
LPInfo & setMetricTerm(bool x) noexcept
Definition AMReX_MLLinOp.H:58
bool deterministic
Enable deterministic mode for GPU operations.
Definition AMReX_MLLinOp.H:49
static constexpr int getDefaultConsolidationGridSize()
Definition AMReX_MLLinOp.H:77
LPInfo & setConsolidationStrategy(int x) noexcept
Definition AMReX_MLLinOp.H:57
int max_coarsening_level
Definition AMReX_MLLinOp.H:45
int agg_grid_size
Definition AMReX_MLLinOp.H:40
static constexpr int getDefaultAgglomerationGridSize()
Definition AMReX_MLLinOp.H:69
int hidden_direction
Definition AMReX_MLLinOp.H:48
LPInfo & setAgglomerationGridSize(int x) noexcept
Definition AMReX_MLLinOp.H:54
LPInfo & setMaxCoarseningLevel(int n) noexcept
Definition AMReX_MLLinOp.H:59
LPInfo & setDeterministic(bool x) noexcept
Definition AMReX_MLLinOp.H:63
LPInfo & setAgglomeration(bool x) noexcept
Definition AMReX_MLLinOp.H:51
int semicoarsening_direction
Definition AMReX_MLLinOp.H:47
Definition AMReX_MLLinOp.H:87
StateMode
Definition AMReX_MLLinOp.H:89
BCMode
Definition AMReX_MLLinOp.H:88
Location
Definition AMReX_MLLinOp.H:90
FabArray memory allocation information.
Definition AMReX_FabArray.H:66