Block-Structured AMR Software Framework
AMReX_MLLinOp.H
Go to the documentation of this file.
1 #ifndef AMREX_ML_LINOP_H_
2 #define AMREX_ML_LINOP_H_
3 #include <AMReX_Config.H>
4 
5 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
6 #include <AMReX_Hypre.H>
7 #include <AMReX_HypreNodeLap.H>
8 #endif
9 
10 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
11 #include <AMReX_PETSc.H>
12 #endif
13 
14 #ifdef AMREX_USE_EB
15 #include <AMReX_EBMultiFabUtil.H>
16 #include <AMReX_MultiCutFab.H>
17 #endif
18 
19 #include <AMReX_Any.H>
20 #include <AMReX_BndryRegister.H>
21 #include <AMReX_FabDataType.H>
22 #include <AMReX_MLMGBndry.H>
23 #include <AMReX_MultiFabUtil.H>
24 
25 #include <algorithm>
26 #include <string>
27 
28 namespace amrex {
29 
30 enum class BottomSolver : int {
32 };
33 
34 struct LPInfo
35 {
36  bool do_agglomeration = true;
37  bool do_consolidation = true;
38  bool do_semicoarsening = false;
39  int agg_grid_size = -1;
40  int con_grid_size = -1;
41  int con_ratio = 2;
42  int con_strategy = 3;
43  bool has_metric_term = true;
47  int hidden_direction = -1;
48 
49  LPInfo& setAgglomeration (bool x) noexcept { do_agglomeration = x; return *this; }
50  LPInfo& setConsolidation (bool x) noexcept { do_consolidation = x; return *this; }
51  LPInfo& setSemicoarsening (bool x) noexcept { do_semicoarsening = x; return *this; }
52  LPInfo& setAgglomerationGridSize (int x) noexcept { agg_grid_size = x; return *this; }
53  LPInfo& setConsolidationGridSize (int x) noexcept { con_grid_size = x; return *this; }
54  LPInfo& setConsolidationRatio (int x) noexcept { con_ratio = x; return *this; }
55  LPInfo& setConsolidationStrategy (int x) noexcept { con_strategy = x; return *this; }
56  LPInfo& setMetricTerm (bool x) noexcept { has_metric_term = x; return *this; }
57  LPInfo& setMaxCoarseningLevel (int n) noexcept { max_coarsening_level = n; return *this; }
58  LPInfo& setMaxSemicoarseningLevel (int n) noexcept { max_semicoarsening_level = n; return *this; }
59  LPInfo& setSemicoarseningDirection (int n) noexcept { semicoarsening_direction = n; return *this; }
60  LPInfo& setHiddenDirection (int n) noexcept { hidden_direction = n; return *this; }
61 
62  [[nodiscard]] bool hasHiddenDimension () const noexcept {
63  return hidden_direction >=0 && hidden_direction < AMREX_SPACEDIM;
64  }
65 
66  static constexpr int getDefaultAgglomerationGridSize () {
67 #ifdef AMREX_USE_GPU
68  return 32;
69 #else
70  return AMREX_D_PICK(32, 16, 8);
71 #endif
72  }
73 
74  static constexpr int getDefaultConsolidationGridSize () {
75 #ifdef AMREX_USE_GPU
76  return 32;
77 #else
78  return AMREX_D_PICK(32, 16, 8);
79 #endif
80  }
81 };
82 
84 {
85  enum struct BCMode { Homogeneous, Inhomogeneous };
86  enum struct StateMode { Solution, Correction };
88 };
89 
90 template <typename T> class MLMGT;
91 template <typename T> class MLCGSolverT;
92 template <typename T> class MLPoissonT;
93 template <typename T> class MLABecLaplacianT;
94 template <typename T> class GMRESMLMGT;
95 
96 template <typename MF>
97 class MLLinOpT
98 {
99 public:
100 
101  template <typename T> friend class MLMGT;
102  template <typename T> friend class MLCGSolverT;
103  template <typename T> friend class MLPoissonT;
104  template <typename T> friend class MLABecLaplacianT;
105  template <typename T> friend class GMRESMLMGT;
106 
107  using MFType = MF;
108  using FAB = typename FabDataType<MF>::fab_type;
109  using RT = typename FabDataType<MF>::value_type;
110 
111  using BCType = LinOpBCType;
115 
116  MLLinOpT () = default;
117  virtual ~MLLinOpT () = default;
118 
119  MLLinOpT (const MLLinOpT<MF>&) = delete;
120  MLLinOpT (MLLinOpT<MF>&&) = delete;
123 
124  void define (const Vector<Geometry>& a_geom,
125  const Vector<BoxArray>& a_grids,
126  const Vector<DistributionMapping>& a_dmap,
127  const LPInfo& a_info,
128  const Vector<FabFactory<FAB> const*>& a_factory,
129  bool eb_limit_coarsening = true);
130 
131  [[nodiscard]] virtual std::string name () const { return std::string("Unspecified"); }
132 
144  const Array<BCType,AMREX_SPACEDIM>& hibc) noexcept;
145 
156  const Vector<Array<BCType,AMREX_SPACEDIM> >& hibc) noexcept;
157 
168  const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept;
169 
177  [[nodiscard]] bool needsCoarseDataForBC () const noexcept { return m_needs_coarse_data_for_bc; }
178 
200  void setCoarseFineBC (const MF* crse, int crse_ratio,
201  LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
202 
203  void setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
204  LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
205 
206  template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
207  void setCoarseFineBC (const AMF* crse, int crse_ratio,
208  LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
209 
210  template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
211  void setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
212  LinOpBCType bc_type = LinOpBCType::Dirichlet) noexcept;
213 
232  virtual void setLevelBC (int /*amrlev*/, const MF* /*levelbcdata*/,
233  const MF* /*robinbc_a*/ = nullptr,
234  const MF* /*robinbc_b*/ = nullptr,
235  const MF* /*robinbc_f*/ = nullptr) = 0;
236 
237  template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int> = 0>
238  void setLevelBC (int amrlev, const AMF* levelbcdata,
239  const AMF* robinbc_a = nullptr,
240  const AMF* robinbc_b = nullptr,
241  const AMF* robinbc_f = nullptr);
242 
244  void setVerbose (int v) noexcept { verbose = v; }
245 
247  void setMaxOrder (int o) noexcept { maxorder = o; }
249  [[nodiscard]] int getMaxOrder () const noexcept { return maxorder; }
250 
253  void setEnforceSingularSolvable (bool o) noexcept { enforceSingularSolvable = o; }
256  [[nodiscard]] bool getEnforceSingularSolvable () const noexcept { return enforceSingularSolvable; }
257 
258  [[nodiscard]] virtual BottomSolver getDefaultBottomSolver () const { return BottomSolver::bicgstab; }
259 
261  [[nodiscard]] virtual int getNComp () const { return 1; }
262 
263  [[nodiscard]] virtual int getNGrow (int /*a_lev*/ = 0, int /*mg_lev*/ = 0) const { return 0; }
264 
266  [[nodiscard]] virtual bool needsUpdate () const { return false; }
268  virtual void update () {}
269 
279  virtual void restriction (int amrlev, int cmglev, MF& crse, MF& fine) const = 0;
280 
289  virtual void interpolation (int amrlev, int fmglev, MF& fine, const MF& crse) const = 0;
290 
299  virtual void interpAssign (int amrlev, int fmglev, MF& fine, MF& crse) const
300  {
301  amrex::ignore_unused(amrlev, fmglev, fine, crse);
302  amrex::Abort("MLLinOpT::interpAssign: Must be implemented for FMG cycle");
303  }
304 
313  virtual void interpolationAmr (int famrlev, MF& fine, const MF& crse,
314  IntVect const& nghost) const
315  {
316  amrex::ignore_unused(famrlev, fine, crse, nghost);
317  amrex::Abort("MLLinOpT::interpolationAmr: Must be implemented for composite solves across multiple AMR levels");
318  }
319 
329  virtual void averageDownSolutionRHS (int camrlev, MF& crse_sol, MF& crse_rhs,
330  const MF& fine_sol, const MF& fine_rhs)
331  {
332  amrex::ignore_unused(camrlev, crse_sol, crse_rhs, fine_sol, fine_rhs);
333  amrex::Abort("MLLinOpT::averageDownSolutionRHS: Must be implemented for composite solves across multiple AMR levels");
334  }
335 
347  virtual void apply (int amrlev, int mglev, MF& out, MF& in, BCMode bc_mode,
348  StateMode s_mode, const MLMGBndryT<MF>* bndry=nullptr) const = 0;
349 
359  virtual void smooth (int amrlev, int mglev, MF& sol, const MF& rhs,
360  bool skip_fillboundary=false) const = 0;
361 
363  virtual void normalize (int amrlev, int mglev, MF& mf) const {
364  amrex::ignore_unused(amrlev, mglev, mf);
365  }
366 
376  virtual void solutionResidual (int amrlev, MF& resid, MF& x, const MF& b,
377  const MF* crse_bcdata=nullptr) = 0;
378 
379  virtual void prepareForFluxes (int /*amrlev*/, const MF* /*crse_bcdata*/ = nullptr) {}
380 
392  virtual void correctionResidual (int amrlev, int mglev, MF& resid, MF& x, const MF& b,
393  BCMode bc_mode, const MF* crse_bcdata=nullptr) = 0;
394 
406  virtual void reflux (int crse_amrlev,
407  MF& res, const MF& crse_sol, const MF& crse_rhs,
408  MF& fine_res, MF& fine_sol, const MF& fine_rhs) const
409  {
410  amrex::ignore_unused(crse_amrlev, res, crse_sol, crse_rhs, fine_res,
411  fine_sol, fine_rhs);
412  amrex::Abort("MLLinOpT::reflux: Must be implemented for composite solves across multiple AMR levels");
413  }
414 
423  virtual void compFlux (int /*amrlev*/, const Array<MF*,AMREX_SPACEDIM>& /*fluxes*/,
424  MF& /*sol*/, Location /*loc*/) const
425  {
426  amrex::Abort("AMReX_MLLinOp::compFlux::How did we get here?");
427  }
428 
437  virtual void compGrad (int /*amrlev*/, const Array<MF*,AMREX_SPACEDIM>& /*grad*/,
438  MF& /*sol*/, Location /*loc*/) const
439  {
440  amrex::Abort("AMReX_MLLinOp::compGrad::How did we get here?");
441  }
442 
444  virtual void applyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
446  virtual void unapplyMetricTerm (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/) const {}
447 
449  virtual void unimposeNeumannBC (int /*amrlev*/, MF& /*rhs*/) const {}
450 
452  virtual void applyInhomogNeumannTerm (int /*amrlev*/, MF& /*rhs*/) const {}
453 
455  virtual void applyOverset (int /*amlev*/, MF& /*rhs*/) const {}
456 
458  virtual void scaleRHS (int /*amrlev*/, MF& /*rhs*/) const {}
459 
461  virtual Vector<RT> getSolvabilityOffset (int /*amrlev*/, int /*mglev*/,
462  MF const& /*rhs*/) const { return {}; }
463 
465  virtual void fixSolvabilityByOffset (int /*amrlev*/, int /*mglev*/, MF& /*rhs*/,
466  Vector<RT> const& /*offset*/) const {}
467 
468  virtual void prepareForSolve () = 0;
469 
470  virtual void prepareForGMRES () {}
471 
474  virtual void setDirichletNodesToZero (int /*amrlev*/, int /*mglev*/,
475  MF& /*mf*/) const
476  {
477  amrex::Warning("This function might need to be implemented for GMRES to work with this LinOp.");
478  }
479 
481  [[nodiscard]] virtual bool isSingular (int amrlev) const = 0;
483  [[nodiscard]] virtual bool isBottomSingular () const = 0;
484 
486  virtual RT xdoty (int amrlev, int mglev, const MF& x, const MF& y, bool local) const = 0;
487 
488  virtual std::unique_ptr<MLLinOpT<MF>> makeNLinOp (int /*grid_size*/) const
489  {
490  amrex::Abort("MLLinOp::makeNLinOp: N-Solve not supported");
491  return nullptr;
492  }
493 
494  virtual void getFluxes (const Vector<Array<MF*,AMREX_SPACEDIM> >& /*a_flux*/,
495  const Vector<MF*>& /*a_sol*/,
496  Location /*a_loc*/) const {
497  amrex::Abort("MLLinOp::getFluxes: How did we get here?");
498  }
499  virtual void getFluxes (const Vector<MF*>& /*a_flux*/,
500  const Vector<MF*>& /*a_sol*/) const {
501  amrex::Abort("MLLinOp::getFluxes: How did we get here?");
502  }
503 
504 #ifdef AMREX_USE_EB
505  virtual void getEBFluxes (const Vector<MF*>& /*a_flux*/,
506  const Vector<MF*>& /*a_sol*/) const {
507  amrex::Abort("MLLinOp::getEBFluxes: How did we get here?");
508  }
509 #endif
510 
511 #if defined(AMREX_USE_HYPRE) && (AMREX_SPACEDIM > 1)
512  [[nodiscard]] virtual std::unique_ptr<Hypre> makeHypre (Hypre::Interface /*hypre_interface*/) const {
513  amrex::Abort("MLLinOp::makeHypre: How did we get here?");
514  return {nullptr};
515  }
516  [[nodiscard]] virtual std::unique_ptr<HypreNodeLap> makeHypreNodeLap(
517  int /*bottom_verbose*/,
518  const std::string& /* options_namespace */) const
519  {
520  amrex::Abort("MLLinOp::makeHypreNodeLap: How did we get here?");
521  return {nullptr};
522  }
523 #endif
524 
525 #if defined(AMREX_USE_PETSC) && (AMREX_SPACEDIM > 1)
526  [[nodiscard]] virtual std::unique_ptr<PETScABecLap> makePETSc () const {
527  amrex::Abort("MLLinOp::makePETSc: How did we get here?");
528  return {nullptr};
529  }
530 #endif
531 
532  [[nodiscard]] virtual bool supportNSolve () const { return false; }
533 
534  virtual void copyNSolveSolution (MF&, MF const&) const {}
535 
536  virtual void postSolve (Vector<MF>& /*sol*/) const {}
537 
538  [[nodiscard]] virtual RT normInf (int amrlev, MF const& mf, bool local) const = 0;
539 
540  virtual void averageDownAndSync (Vector<MF>& sol) const = 0;
541 
542  virtual void avgDownResAmr (int clev, MF& cres, MF const& fres) const
543  {
544  amrex::ignore_unused(clev, cres, fres);
545  amrex::Abort("MLLinOpT::avgDownResAmr: Must be implemented for composite solves across multiple AMR levels");
546  }
547 
548  // This function is needed for FMG cycle, but not V-cycle.
549  virtual void avgDownResMG (int clev, MF& cres, MF const& fres) const;
550 
551  [[nodiscard]] bool isMFIterSafe (int amrlev, int mglev1, int mglev2) const;
552 
554  [[nodiscard]] int NAMRLevels () const noexcept { return m_num_amr_levels; }
555 
557  [[nodiscard]] int NMGLevels (int amrlev) const noexcept { return m_num_mg_levels[amrlev]; }
558 
559  [[nodiscard]] const Geometry& Geom (int amr_lev, int mglev=0) const noexcept { return m_geom[amr_lev][mglev]; }
560 
561  // BC
564  // Need to save the original copy because we change the BC type to
565  // Neumann for inhomogeneous Neumann and Robin.
568 
569 protected:
570 
571  static constexpr int mg_coarsen_ratio = 2;
572  static constexpr int mg_box_min_width = 2;
574 
576 
577  int verbose = 0;
578 
579  int maxorder = 3;
580 
582 
585 
587  const MLLinOpT<MF>* m_parent = nullptr;
588 
590 
591  bool m_do_agglomeration = false;
592  bool m_do_consolidation = false;
593 
594  bool m_do_semicoarsening = false;
596 
603 
606  struct CommContainer {
608  CommContainer (MPI_Comm m) noexcept : comm(m) {}
609  CommContainer (const CommContainer&) = delete;
611  void operator= (const CommContainer&) = delete;
612  void operator= (CommContainer&&) = delete;
613  ~CommContainer () { // NOLINT(modernize-use-equals-default)
614 #ifdef BL_USE_MPI
615  if (comm != MPI_COMM_NULL) { MPI_Comm_free(&comm); }
616 #endif
617  }
618  };
619  std::unique_ptr<CommContainer> m_raii_comm;
620 
623 
625  LinOpBCType m_coarse_fine_bc_type = LinOpBCType::Dirichlet;
628  const MF* m_coarse_data_for_bc = nullptr;
630 
632  [[nodiscard]] const Vector<int>& AMRRefRatio () const noexcept { return m_amr_ref_ratio; }
633 
635  [[nodiscard]] int AMRRefRatio (int amr_lev) const noexcept { return m_amr_ref_ratio[amr_lev]; }
636 
637  [[nodiscard]] FabFactory<FAB> const* Factory (int amr_lev, int mglev=0) const noexcept {
638  return m_factory[amr_lev][mglev].get();
639  }
640 
641  [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> LoBC (int icomp = 0) const noexcept {
643  m_lobc[icomp][1],
644  m_lobc[icomp][2])}};
645  }
646  [[nodiscard]] GpuArray<BCType,AMREX_SPACEDIM> HiBC (int icomp = 0) const noexcept {
648  m_hibc[icomp][1],
649  m_hibc[icomp][2])}};
650  }
651 
652  [[nodiscard]] bool hasBC (BCType bct) const noexcept;
653  [[nodiscard]] bool hasInhomogNeumannBC () const noexcept;
654  [[nodiscard]] bool hasRobinBC () const noexcept;
655 
656  [[nodiscard]] virtual bool supportRobinBC () const noexcept { return false; }
657  [[nodiscard]] virtual bool supportInhomogNeumannBC () const noexcept { return false; }
658 
659 #ifdef BL_USE_MPI
660  [[nodiscard]] bool isBottomActive () const noexcept { return m_bottom_comm != MPI_COMM_NULL; }
661 #else
662  [[nodiscard]] bool isBottomActive () const noexcept { return true; }
663 #endif
664  [[nodiscard]] MPI_Comm BottomCommunicator () const noexcept { return m_bottom_comm; }
665  [[nodiscard]] MPI_Comm Communicator () const noexcept { return m_default_comm; }
666 
667  void setCoarseFineBCLocation (const RealVect& cloc) noexcept { m_coarse_bc_loc = cloc; }
668 
669  [[nodiscard]] bool doAgglomeration () const noexcept { return m_do_agglomeration; }
670  [[nodiscard]] bool doConsolidation () const noexcept { return m_do_consolidation; }
671  [[nodiscard]] bool doSemicoarsening () const noexcept { return m_do_semicoarsening; }
672 
673  [[nodiscard]] bool isCellCentered () const noexcept { return m_ixtype == 0; }
674 
675  [[nodiscard]] virtual IntVect getNGrowVectRestriction () const {
676  return isCellCentered() ? IntVect(0) : IntVect(1);
677  }
678 
679  virtual void make (Vector<Vector<MF> >& mf, IntVect const& ng) const;
680 
681  [[nodiscard]] virtual MF make (int amrlev, int mglev, IntVect const& ng) const;
682 
683  [[nodiscard]] virtual MF makeAlias (MF const& mf) const;
684 
685  [[nodiscard]] virtual MF makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const;
686 
687  [[nodiscard]] virtual MF makeCoarseAmr (int famrlev, IntVect const& ng) const;
688 
689  [[nodiscard]] virtual std::unique_ptr<FabFactory<FAB> > makeFactory (int /*amrlev*/, int /*mglev*/) const {
690  return std::make_unique<DefaultFabFactory<FAB>>();
691  }
692 
693  virtual void resizeMultiGrid (int new_size);
694 
695  [[nodiscard]] bool hasHiddenDimension () const noexcept { return info.hasHiddenDimension(); }
696  [[nodiscard]] int hiddenDirection () const noexcept { return info.hidden_direction; }
697  [[nodiscard]] Box compactify (Box const& b) const noexcept;
698 
699  template <typename T>
700  [[nodiscard]] Array4<T> compactify (Array4<T> const& a) const noexcept
701  {
702  if (info.hidden_direction == 0) {
703  return Array4<T>(a.dataPtr(), {a.begin.y,a.begin.z,0}, {a.end.y,a.end.z,1}, a.nComp());
704  } else if (info.hidden_direction == 1) {
705  return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.z,0}, {a.end.x,a.end.z,1}, a.nComp());
706  } else if (info.hidden_direction == 2) {
707  return Array4<T>(a.dataPtr(), {a.begin.x,a.begin.y,0}, {a.end.x,a.end.y,1}, a.nComp());
708  } else {
709  return a;
710  }
711  }
712 
713  template <typename T>
714  [[nodiscard]] T get_d0 (T const& dx, T const& dy, T const&) const noexcept
715  {
716  if (info.hidden_direction == 0) {
717  return dy;
718  } else {
719  return dx;
720  }
721  }
722 
723  template <typename T>
724  [[nodiscard]] T get_d1 (T const&, T const& dy, T const& dz) const noexcept
725  {
726  if (info.hidden_direction == 0 || info.hidden_direction == 1) {
727  return dz;
728  } else {
729  return dy;
730  }
731  }
732 
733 private:
734 
735  void defineGrids (const Vector<Geometry>& a_geom,
736  const Vector<BoxArray>& a_grids,
737  const Vector<DistributionMapping>& a_dmap,
738  const Vector<FabFactory<FAB> const*>& a_factory);
739  void defineBC ();
742  int ratio, int strategy);
744 
745  virtual void checkPoint (std::string const& /*file_name*/) const {
746  amrex::Abort("MLLinOp:checkPoint: not implemented");
747  }
748 
753 };
754 
755 template <typename MF>
756 void
758  const Vector<BoxArray>& a_grids,
759  const Vector<DistributionMapping>& a_dmap,
760  const LPInfo& a_info,
761  const Vector<FabFactory<FAB> const*>& a_factory,
762  [[maybe_unused]] bool eb_limit_coarsening)
763 {
764  BL_PROFILE("MLLinOp::define()");
765 
766  info = a_info;
767 #ifdef AMREX_USE_GPU
769  {
770  if (info.agg_grid_size <= 0) { info.agg_grid_size = AMREX_D_PICK(32, 16, 8); }
771  if (info.con_grid_size <= 0) { info.con_grid_size = AMREX_D_PICK(32, 16, 8); }
772  }
773  else
774 #endif
775  {
776  if (info.agg_grid_size <= 0) { info.agg_grid_size = LPInfo::getDefaultAgglomerationGridSize(); }
777  if (info.con_grid_size <= 0) { info.con_grid_size = LPInfo::getDefaultConsolidationGridSize(); }
778  }
779 
780 #ifdef AMREX_USE_EB
781  if (!a_factory.empty() && eb_limit_coarsening) {
782  const auto *f = dynamic_cast<EBFArrayBoxFactory const*>(a_factory[0]);
783  if (f) {
784  info.max_coarsening_level = std::min(info.max_coarsening_level,
785  f->maxCoarseningLevel());
786  }
787  }
788 #endif
789  defineGrids(a_geom, a_grids, a_dmap, a_factory);
790  defineBC();
791 }
792 
793 template <typename MF>
794 void
796  const Vector<BoxArray>& a_grids,
797  const Vector<DistributionMapping>& a_dmap,
798  const Vector<FabFactory<FAB> const*>& a_factory)
799 {
800  BL_PROFILE("MLLinOp::defineGrids()");
801 
802 #ifdef AMREX_USE_EB
803  if ( ! a_factory.empty() ) {
804  auto const* ebf = dynamic_cast<EBFArrayBoxFactory const*>(a_factory[0]);
805  if (ebf && !(ebf->isAllRegular())) { // Has non-trivial EB
806  mg_domain_min_width = 4;
807  }
808  }
809 #endif
810 
811  m_num_amr_levels = 0;
812  for (int amrlev = 0; amrlev < a_geom.size(); amrlev++) {
813  if (!a_grids[amrlev].empty()) {
814  m_num_amr_levels++;
815  }
816  }
817 
818  m_amr_ref_ratio.resize(m_num_amr_levels);
819  m_num_mg_levels.resize(m_num_amr_levels);
820 
821  m_geom.resize(m_num_amr_levels);
822  m_grids.resize(m_num_amr_levels);
823  m_dmap.resize(m_num_amr_levels);
824  m_factory.resize(m_num_amr_levels);
825 
826  m_default_comm = ParallelContext::CommunicatorSub();
827 
828  const RealBox& rb = a_geom[0].ProbDomain();
829  const int coord = a_geom[0].Coord();
830  const Array<int,AMREX_SPACEDIM>& is_per = a_geom[0].isPeriodic();
831 
832  IntVect mg_coarsen_ratio_v(mg_coarsen_ratio);
833  IntVect mg_box_min_width_v(mg_box_min_width);
834  IntVect mg_domain_min_width_v(mg_domain_min_width);
835  if (hasHiddenDimension()) {
836  AMREX_ASSERT_WITH_MESSAGE(AMREX_SPACEDIM == 3 && m_num_amr_levels == 1,
837  "Hidden direction only supported for 3d level solve");
838  mg_coarsen_ratio_v[info.hidden_direction] = 1;
839  mg_box_min_width_v[info.hidden_direction] = 0;
840  mg_domain_min_width_v[info.hidden_direction] = 0;
841  }
842 
843  // fine amr levels
844  for (int amrlev = m_num_amr_levels-1; amrlev > 0; --amrlev)
845  {
846  m_num_mg_levels[amrlev] = 1;
847  m_geom[amrlev].push_back(a_geom[amrlev]);
848  m_grids[amrlev].push_back(a_grids[amrlev]);
849  m_dmap[amrlev].push_back(a_dmap[amrlev]);
850  if (amrlev < a_factory.size()) {
851  m_factory[amrlev].emplace_back(a_factory[amrlev]->clone());
852  } else {
853  m_factory[amrlev].push_back(std::make_unique<DefaultFabFactory<FAB>>());
854  }
855 
856  IntVect rr = mg_coarsen_ratio_v;
857  const Box& dom = a_geom[amrlev].Domain();
858  for (int i = 0; i < 2; ++i)
859  {
860  if (!dom.coarsenable(rr)) { amrex::Abort("MLLinOp: Uncoarsenable domain"); }
861 
862  const Box& cdom = amrex::coarsen(dom,rr);
863  if (cdom == a_geom[amrlev-1].Domain()) { break; }
864 
865  ++(m_num_mg_levels[amrlev]);
866 
867  m_geom[amrlev].emplace_back(cdom, rb, coord, is_per);
868 
869  m_grids[amrlev].push_back(a_grids[amrlev]);
870  AMREX_ASSERT(m_grids[amrlev].back().coarsenable(rr));
871  m_grids[amrlev].back().coarsen(rr);
872 
873  m_dmap[amrlev].push_back(a_dmap[amrlev]);
874 
875  rr *= mg_coarsen_ratio_v;
876  }
877 
878 #if (AMREX_SPACEDIM > 1)
879  if (hasHiddenDimension()) {
880  m_amr_ref_ratio[amrlev-1] = rr[AMREX_SPACEDIM-info.hidden_direction];
881  } else
882 #endif
883  {
884  m_amr_ref_ratio[amrlev-1] = rr[0];
885  }
886  }
887 
888  // coarsest amr level
889  m_num_mg_levels[0] = 1;
890  m_geom[0].push_back(a_geom[0]);
891  m_grids[0].push_back(a_grids[0]);
892  m_dmap[0].push_back(a_dmap[0]);
893  if (!a_factory.empty()) {
894  m_factory[0].emplace_back(a_factory[0]->clone());
895  } else {
896  m_factory[0].push_back(std::make_unique<DefaultFabFactory<FAB>>());
897  }
898 
899  m_domain_covered.resize(m_num_amr_levels, false);
900  auto npts0 = m_grids[0][0].numPts();
901  m_domain_covered[0] = (npts0 == compactify(m_geom[0][0].Domain()).numPts());
902  for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
903  {
904  if (!m_domain_covered[amrlev-1]) { break; }
905  m_domain_covered[amrlev] = (m_grids[amrlev][0].numPts() ==
906  compactify(m_geom[amrlev][0].Domain()).numPts());
907  }
908 
909  Box aggbox;
910  bool aggable = false;
911 
912  if (m_grids[0][0].size() > 1 && info.do_agglomeration)
913  {
914  if (m_domain_covered[0])
915  {
916  aggbox = m_geom[0][0].Domain();
917  if (hasHiddenDimension()) {
918  aggbox.makeSlab(hiddenDirection(), m_grids[0][0][0].smallEnd(hiddenDirection()));
919  }
920  aggable = true;
921  }
922  else
923  {
924  aggbox = m_grids[0][0].minimalBox();
925  aggable = (aggbox.numPts() == npts0);
926  }
927  }
928 
929  bool agged = false;
930  bool coned = false;
931  int agg_lev = 0, con_lev = 0;
932 
933  AMREX_ALWAYS_ASSERT( ! (info.do_semicoarsening && info.hasHiddenDimension())
934  && info.semicoarsening_direction >= -1
935  && info.semicoarsening_direction < AMREX_SPACEDIM );
936 
937  if (info.do_agglomeration && aggable)
938  {
939  Box dbx = m_geom[0][0].Domain();
940  Box bbx = aggbox;
941  Real const nbxs = static_cast<Real>(m_grids[0][0].size());
942  Real const threshold_npts = static_cast<Real>(AMREX_D_TERM(info.agg_grid_size,
943  *info.agg_grid_size,
944  *info.agg_grid_size));
945  Vector<Box> domainboxes{dbx};
946  Vector<Box> boundboxes{bbx};
947  Vector<int> agg_flag{false};
948  Vector<IntVect> accum_coarsen_ratio{IntVect(1)};
949  int numsclevs = 0;
950 
951  for (int lev = 0; lev < info.max_coarsening_level; ++lev)
952  {
953  IntVect rr_level = mg_coarsen_ratio_v;
954  bool const do_semicoarsening_level = info.do_semicoarsening
955  && numsclevs < info.max_semicoarsening_level;
956  if (do_semicoarsening_level
957  && info.semicoarsening_direction != -1)
958  {
959  rr_level[info.semicoarsening_direction] = 1;
960  }
961  IntVect is_coarsenable;
962  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
963  IntVect rr_dir(1);
964  rr_dir[idim] = rr_level[idim];
965  is_coarsenable[idim] = dbx.coarsenable(rr_dir, mg_domain_min_width_v)
966  && bbx.coarsenable(rr_dir, mg_box_min_width_v);
967  if (!is_coarsenable[idim] && do_semicoarsening_level
968  && info.semicoarsening_direction == -1)
969  {
970  is_coarsenable[idim] = true;
971  rr_level[idim] = 1;
972  }
973  }
974  if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
975  break;
976  }
977  if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
978  // make sure there is at most one direction that is not coarsened
979  int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
980  + static_cast<int>(rr_level[1] == 1),
981  + static_cast<int>(rr_level[2] == 1));
982  if (n_ones > 1) { break; }
983  }
984  if (rr_level != mg_coarsen_ratio_v) {
985  ++numsclevs;
986  }
987 
988  accum_coarsen_ratio.push_back(accum_coarsen_ratio.back()*rr_level);
989  domainboxes.push_back(dbx.coarsen(rr_level));
990  boundboxes.push_back(bbx.coarsen(rr_level));
991  bool to_agg = (bbx.d_numPts() / nbxs) < 0.999*threshold_npts;
992  agg_flag.push_back(to_agg);
993  }
994 
995  for (int lev = 1, nlevs = static_cast<int>(domainboxes.size()); lev < nlevs; ++lev) {
996  if (!agged && !agg_flag[lev] &&
997  a_grids[0].coarsenable(accum_coarsen_ratio[lev], mg_box_min_width_v))
998  {
999  m_grids[0].push_back(amrex::coarsen(a_grids[0], accum_coarsen_ratio[lev]));
1000  m_dmap[0].push_back(a_dmap[0]);
1001  } else {
1002  IntVect cr = domainboxes[lev-1].length() / domainboxes[lev].length();
1003  if (!m_grids[0].back().coarsenable(cr)) {
1004  break; // average_down would fail if fine boxarray is not coarsenable.
1005  }
1006  m_grids[0].emplace_back(boundboxes[lev]);
1007  IntVect max_grid_size(info.agg_grid_size);
1008  if (info.do_semicoarsening && info.max_semicoarsening_level >= lev
1009  && info.semicoarsening_direction != -1)
1010  {
1011  IntVect blen = amrex::enclosedCells(boundboxes[lev]).size();
1012  AMREX_D_TERM(int mgs_0 = (max_grid_size[0]+blen[0]-1) / blen[0];,
1013  int mgs_1 = (max_grid_size[1]+blen[1]-1) / blen[1];,
1014  int mgs_2 = (max_grid_size[2]+blen[2]-1) / blen[2]);
1015  max_grid_size[info.semicoarsening_direction]
1016  *= AMREX_D_TERM(mgs_0, *mgs_1, *mgs_2);
1017  }
1018  m_grids[0].back().maxSize(max_grid_size);
1019  m_dmap[0].push_back(DistributionMapping());
1020  if (!agged) {
1021  agged = true;
1022  agg_lev = lev;
1023  }
1024  }
1025  m_geom[0].emplace_back(domainboxes[lev],rb,coord,is_per);
1026  }
1027  }
1028  else
1029  {
1030  Long consolidation_threshold = 0;
1031  Real avg_npts = 0.0;
1032  if (info.do_consolidation) {
1033  avg_npts = static_cast<Real>(a_grids[0].d_numPts()) / static_cast<Real>(ParallelContext::NProcsSub());
1034  consolidation_threshold = AMREX_D_TERM(Long(info.con_grid_size),
1035  *info.con_grid_size,
1036  *info.con_grid_size);
1037  }
1038 
1039  Box const& dom0 = a_geom[0].Domain();
1040  IntVect rr_vec(1);
1041  int numsclevs = 0;
1042  for (int lev = 0; lev < info.max_coarsening_level; ++lev)
1043  {
1044  IntVect rr_level = mg_coarsen_ratio_v;
1045  bool do_semicoarsening_level = info.do_semicoarsening
1046  && numsclevs < info.max_semicoarsening_level;
1047  if (do_semicoarsening_level
1048  && info.semicoarsening_direction != -1)
1049  {
1050  rr_level[info.semicoarsening_direction] = 1;
1051  }
1052  IntVect is_coarsenable;
1053  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1054  IntVect rr_dir(1);
1055  rr_dir[idim] = rr_vec[idim] * rr_level[idim];
1056  is_coarsenable[idim] = dom0.coarsenable(rr_dir, mg_domain_min_width_v)
1057  && a_grids[0].coarsenable(rr_dir, mg_box_min_width_v);
1058  if (!is_coarsenable[idim] && do_semicoarsening_level
1059  && info.semicoarsening_direction == -1)
1060  {
1061  is_coarsenable[idim] = true;
1062  rr_level[idim] = 1;
1063  }
1064  }
1065  if (is_coarsenable != IntVect(1) || rr_level == IntVect(1)) {
1066  break;
1067  }
1068  if (do_semicoarsening_level && info.semicoarsening_direction == -1) {
1069  // make sure there is at most one direction that is not coarsened
1070  int n_ones = AMREX_D_TERM( static_cast<int>(rr_level[0] == 1),
1071  + static_cast<int>(rr_level[1] == 1),
1072  + static_cast<int>(rr_level[2] == 1));
1073  if (n_ones > 1) { break; }
1074  }
1075  if (rr_level != mg_coarsen_ratio_v) {
1076  ++numsclevs;
1077  }
1078  rr_vec *= rr_level;
1079 
1080  m_geom[0].emplace_back(amrex::coarsen(dom0, rr_vec), rb, coord, is_per);
1081  m_grids[0].push_back(amrex::coarsen(a_grids[0], rr_vec));
1082 
1083  if (info.do_consolidation)
1084  {
1085  if (avg_npts/static_cast<Real>(AMREX_D_TERM(rr_vec[0], *rr_vec[1], *rr_vec[2]))
1086  < Real(0.999)*static_cast<Real>(consolidation_threshold))
1087  {
1088  coned = true;
1089  con_lev = m_dmap[0].size();
1090  m_dmap[0].push_back(DistributionMapping());
1091  }
1092  else
1093  {
1094  m_dmap[0].push_back(m_dmap[0].back());
1095  }
1096  }
1097  else
1098  {
1099  m_dmap[0].push_back(a_dmap[0]);
1100  }
1101  }
1102  }
1103 
1104  m_num_mg_levels[0] = m_grids[0].size();
1105 
1106  for (int mglev = 0; mglev < m_num_mg_levels[0] - 1; mglev++){
1107  const Box& fine_domain = m_geom[0][mglev].Domain();
1108  const Box& crse_domain = m_geom[0][mglev+1].Domain();
1109  mg_coarsen_ratio_vec.push_back(fine_domain.length()/crse_domain.length());
1110  }
1111 
1112  for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev) {
1113  if (AMRRefRatio(amrlev) == 4 && mg_coarsen_ratio_vec.empty()) {
1114  mg_coarsen_ratio_vec.push_back(IntVect(2));
1115  }
1116  }
1117 
1118  if (agged)
1119  {
1120  makeAgglomeratedDMap(m_grids[0], m_dmap[0]);
1121  }
1122  else if (coned)
1123  {
1124  makeConsolidatedDMap(m_grids[0], m_dmap[0], info.con_ratio, info.con_strategy);
1125  }
1126 
1127  if (agged || coned)
1128  {
1129  m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1130  }
1131  else
1132  {
1133  m_bottom_comm = m_default_comm;
1134  }
1135 
1136  m_do_agglomeration = agged;
1137  m_do_consolidation = coned;
1138 
1139  if (verbose > 1) {
1140  if (agged) {
1141  Print() << "MLLinOp::defineGrids(): agglomerated AMR level 0 starting at MG level "
1142  << agg_lev << " of " << m_num_mg_levels[0] << "\n";
1143  } else if (coned) {
1144  Print() << "MLLinOp::defineGrids(): consolidated AMR level 0 starting at MG level "
1145  << con_lev << " of " << m_num_mg_levels[0]
1146  << " (ratio = " << info.con_ratio << ")" << "\n";
1147  } else {
1148  Print() << "MLLinOp::defineGrids(): no agglomeration or consolidation of AMR level 0\n";
1149  }
1150  }
1151 
1152  for (int amrlev = 0; amrlev < m_num_amr_levels; ++amrlev)
1153  {
1154  for (int mglev = 1; mglev < m_num_mg_levels[amrlev]; ++mglev)
1155  {
1156  m_factory[amrlev].emplace_back(makeFactory(amrlev,mglev));
1157  }
1158  }
1159 
1160  for (int amrlev = 1; amrlev < m_num_amr_levels; ++amrlev)
1161  {
1162  AMREX_ASSERT_WITH_MESSAGE(m_grids[amrlev][0].coarsenable(m_amr_ref_ratio[amrlev-1]),
1163  "MLLinOp: grids not coarsenable between AMR levels");
1164  }
1165 }
1166 
1167 template <typename MF>
1168 void
1170 {
1171  m_needs_coarse_data_for_bc = !m_domain_covered[0];
1172 
1173  levelbc_raii.resize(m_num_amr_levels);
1174  robin_a_raii.resize(m_num_amr_levels);
1175  robin_b_raii.resize(m_num_amr_levels);
1176  robin_f_raii.resize(m_num_amr_levels);
1177 }
1178 
1179 template <typename MF>
1180 void
1182  const Array<BCType,AMREX_SPACEDIM>& a_hibc) noexcept
1183 {
1184  const int ncomp = getNComp();
1185  setDomainBC(Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_lobc),
1186  Vector<Array<BCType,AMREX_SPACEDIM> >(ncomp,a_hibc));
1187 }
1188 
1189 template <typename MF>
1190 void
1192  const Vector<Array<BCType,AMREX_SPACEDIM> >& a_hibc) noexcept
1193 {
1194  const int ncomp = getNComp();
1195  AMREX_ASSERT_WITH_MESSAGE(ncomp == a_lobc.size() && ncomp == a_hibc.size(),
1196  "MLLinOp::setDomainBC: wrong size");
1197  m_lobc = a_lobc;
1198  m_hibc = a_hibc;
1199  m_lobc_orig = m_lobc;
1200  m_hibc_orig = m_hibc;
1201  for (int icomp = 0; icomp < ncomp; ++icomp) {
1202  for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) {
1203  if (m_geom[0][0].isPeriodic(idim)) {
1204  AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] == BCType::Periodic &&
1205  m_hibc[icomp][idim] == BCType::Periodic);
1206  } else {
1207  AMREX_ALWAYS_ASSERT(m_lobc[icomp][idim] != BCType::Periodic &&
1208  m_hibc[icomp][idim] != BCType::Periodic);
1209  }
1210 
1211  if (m_lobc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1212  m_lobc[icomp][idim] == LinOpBCType::Robin)
1213  {
1214  m_lobc[icomp][idim] = LinOpBCType::Neumann;
1215  }
1216 
1217  if (m_hibc[icomp][idim] == LinOpBCType::inhomogNeumann ||
1218  m_hibc[icomp][idim] == LinOpBCType::Robin)
1219  {
1220  m_hibc[icomp][idim] = LinOpBCType::Neumann;
1221  }
1222  }
1223  }
1224 
1225  if (hasHiddenDimension()) {
1226  const int hd = hiddenDirection();
1227  for (int n = 0; n < ncomp; ++n) {
1228  m_lobc[n][hd] = LinOpBCType::Neumann;
1229  m_hibc[n][hd] = LinOpBCType::Neumann;
1230  }
1231  }
1232 
1233  if (hasInhomogNeumannBC() && !supportInhomogNeumannBC()) {
1234  amrex::Abort("Inhomogeneous Neumann BC not supported");
1235  }
1236  if (hasRobinBC() && !supportRobinBC()) {
1237  amrex::Abort("Robin BC not supported");
1238  }
1239 }
1240 
1241 template <typename MF>
1242 bool
1243 MLLinOpT<MF>::hasBC (BCType bct) const noexcept
1244 {
1245  int ncomp = m_lobc_orig.size();
1246  for (int n = 0; n < ncomp; ++n) {
1247  for (int idim = 0; idim <AMREX_SPACEDIM; ++idim) {
1248  if (m_lobc_orig[n][idim] == bct || m_hibc_orig[n][idim] == bct) {
1249  return true;
1250  }
1251  }
1252  }
1253  return false;
1254 }
1255 
1256 template <typename MF>
1257 bool
1259 {
1260  return hasBC(BCType::inhomogNeumann);
1261 }
1262 
1263 template <typename MF>
1264 bool
1265 MLLinOpT<MF>::hasRobinBC () const noexcept
1266 {
1267  return hasBC(BCType::Robin);
1268 }
1269 
1270 template <typename MF>
1271 Box
1272 MLLinOpT<MF>::compactify (Box const& b) const noexcept
1273 {
1274 #if (AMREX_SPACEDIM == 3)
1275  if (info.hasHiddenDimension()) {
1276  const auto& lo = b.smallEnd();
1277  const auto& hi = b.bigEnd();
1278  if (info.hidden_direction == 0) {
1279  return Box(IntVect(lo[1],lo[2],0), IntVect(hi[1],hi[2],0), b.ixType());
1280  } else if (info.hidden_direction == 1) {
1281  return Box(IntVect(lo[0],lo[2],0), IntVect(hi[0],hi[2],0), b.ixType());
1282  } else {
1283  return Box(IntVect(lo[0],lo[1],0), IntVect(hi[0],hi[1],0), b.ixType());
1284  }
1285  } else
1286 #endif
1287  {
1288  return b;
1289  }
1290 }
1291 
1292 template <typename MF>
1293 void
1296 {
1297  BL_PROFILE("MLLinOp::makeAgglomeratedDMap");
1298 
1299  BL_ASSERT(!dm[0].empty());
1300  for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1301  {
1302  if (dm[i].empty())
1303  {
1304  const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1305 
1306  const int nprocs = ParallelContext::NProcsSub();
1307  AMREX_ASSERT(static_cast<int>(sfc.size()) == nprocs);
1308 
1309  Vector<int> pmap(ba[i].size());
1310  for (int iproc = 0; iproc < nprocs; ++iproc) {
1311  int grank = ParallelContext::local_to_global_rank(iproc);
1312  for (int ibox : sfc[iproc]) {
1313  pmap[ibox] = grank;
1314  }
1315  }
1316  dm[i].define(std::move(pmap));
1317  }
1318  }
1319 }
1320 
1321 template <typename MF>
1322 void
1325  int ratio, int strategy)
1326 {
1327  BL_PROFILE("MLLinOp::makeConsolidatedDMap()");
1328 
1329  int factor = 1;
1330  BL_ASSERT(!dm[0].empty());
1331  for (int i = 1, N=static_cast<int>(ba.size()); i < N; ++i)
1332  {
1333  if (dm[i].empty())
1334  {
1335  factor *= ratio;
1336 
1337  const int nprocs = ParallelContext::NProcsSub();
1338  const auto& pmap_fine = dm[i-1].ProcessorMap();
1339  Vector<int> pmap(pmap_fine.size());
1340  ParallelContext::global_to_local_rank(pmap.data(), pmap_fine.data(), static_cast<int>(pmap.size()));
1341  if (strategy == 1) {
1342  for (auto& x: pmap) {
1343  x /= ratio;
1344  }
1345  } else if (strategy == 2) {
1346  int nprocs_con = static_cast<int>(std::ceil(static_cast<Real>(nprocs)
1347  / static_cast<Real>(factor)));
1348  for (auto& x: pmap) {
1349  auto d = std::div(x,nprocs_con);
1350  x = d.rem;
1351  }
1352  } else if (strategy == 3) {
1353  if (factor == ratio) {
1354  const std::vector< std::vector<int> >& sfc = DistributionMapping::makeSFC(ba[i]);
1355  for (int iproc = 0; iproc < nprocs; ++iproc) {
1356  for (int ibox : sfc[iproc]) {
1357  pmap[ibox] = iproc;
1358  }
1359  }
1360  }
1361  for (auto& x: pmap) {
1362  x /= ratio;
1363  }
1364  }
1365 
1367  dm[i].define(std::move(pmap));
1368  } else {
1369  Vector<int> pmap_g(pmap.size());
1370  ParallelContext::local_to_global_rank(pmap_g.data(), pmap.data(), static_cast<int>(pmap.size()));
1371  dm[i].define(std::move(pmap_g));
1372  }
1373  }
1374  }
1375 }
1376 
1377 template <typename MF>
1378 MPI_Comm
1380 {
1381  BL_PROFILE("MLLinOp::makeSubCommunicator()");
1382 
1383 #ifdef BL_USE_MPI
1384 
1385  Vector<int> newgrp_ranks = dm.ProcessorMap();
1386  std::sort(newgrp_ranks.begin(), newgrp_ranks.end());
1387  auto last = std::unique(newgrp_ranks.begin(), newgrp_ranks.end());
1388  newgrp_ranks.erase(last, newgrp_ranks.end());
1389 
1390  MPI_Comm newcomm;
1391  MPI_Group defgrp, newgrp;
1392  MPI_Comm_group(m_default_comm, &defgrp);
1394  MPI_Group_incl(defgrp, static_cast<int>(newgrp_ranks.size()), newgrp_ranks.data(), &newgrp);
1395  } else {
1396  Vector<int> local_newgrp_ranks(newgrp_ranks.size());
1397  ParallelContext::global_to_local_rank(local_newgrp_ranks.data(),
1398  newgrp_ranks.data(), static_cast<int>(newgrp_ranks.size()));
1399  MPI_Group_incl(defgrp, static_cast<int>(local_newgrp_ranks.size()), local_newgrp_ranks.data(), &newgrp);
1400  }
1401 
1402  MPI_Comm_create(m_default_comm, newgrp, &newcomm);
1403 
1404  m_raii_comm = std::make_unique<CommContainer>(newcomm);
1405 
1406  MPI_Group_free(&defgrp);
1407  MPI_Group_free(&newgrp);
1408 
1409  return newcomm;
1410 #else
1412  return m_default_comm;
1413 #endif
1414 }
1415 
1416 template <typename MF>
1417 void
1419  const Array<Real,AMREX_SPACEDIM>& hi_bcloc) noexcept
1420 {
1421  m_domain_bloc_lo = lo_bcloc;
1422  m_domain_bloc_hi = hi_bcloc;
1423 }
1424 
1425 template <typename MF>
1426 void
1427 MLLinOpT<MF>::setCoarseFineBC (const MF* crse, int crse_ratio,
1428  LinOpBCType bc_type) noexcept
1429 {
1430  setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1431 }
1432 
1433 template <typename MF>
1434 void
1435 MLLinOpT<MF>::setCoarseFineBC (const MF* crse, IntVect const& crse_ratio,
1436  LinOpBCType bc_type) noexcept
1437 {
1438  m_coarse_data_for_bc = crse;
1439  m_coarse_data_crse_ratio = crse_ratio;
1440  m_coarse_fine_bc_type = bc_type;
1441 }
1442 
1443 template <typename MF>
1444 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1445 void
1446 MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, int crse_ratio,
1447  LinOpBCType bc_type) noexcept
1448 {
1449  setCoarseFineBC(crse, IntVect(crse_ratio), bc_type);
1450 }
1451 
1452 template <typename MF>
1453 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1454 void
1455 MLLinOpT<MF>::setCoarseFineBC (const AMF* crse, IntVect const& crse_ratio,
1456  LinOpBCType bc_type) noexcept
1457 {
1458  m_coarse_data_for_bc_raii = MF(crse->boxArray(), crse->DistributionMap(),
1459  crse->nComp(), crse->nGrowVect());
1460  m_coarse_data_for_bc_raii.LocalCopy(*crse, 0, 0, crse->nComp(),
1461  crse->nGrowVect());
1462  m_coarse_data_for_bc = &m_coarse_data_for_bc_raii;
1463  m_coarse_data_crse_ratio = crse_ratio;
1464  m_coarse_fine_bc_type = bc_type;
1465 }
1466 
1467 template <typename MF>
1468 void
1470 {
1471  mf.clear();
1472  mf.resize(m_num_amr_levels);
1473  for (int alev = 0; alev < m_num_amr_levels; ++alev) {
1474  mf[alev].resize(m_num_mg_levels[alev]);
1475  for (int mlev = 0; mlev < m_num_mg_levels[alev]; ++mlev) {
1476  mf[alev][mlev] = make(alev, mlev, ng);
1477  }
1478  }
1479 }
1480 
1481 template <typename MF>
1482 MF
1483 MLLinOpT<MF>::make (int amrlev, int mglev, IntVect const& ng) const
1484 {
1485  if constexpr (IsMultiFabLike_v<MF>) {
1486  return MF(amrex::convert(m_grids[amrlev][mglev], m_ixtype),
1487  m_dmap[amrlev][mglev], getNComp(), ng, MFInfo(),
1488  *m_factory[amrlev][mglev]);
1489  } else {
1490  amrex::ignore_unused(amrlev, mglev, ng);
1491  amrex::Abort("MLLinOpT::make: how did we get here?");
1492  return {};
1493  }
1494 }
1495 
1496 template <typename MF>
1497 MF
1498 MLLinOpT<MF>::makeAlias (MF const& mf) const
1499 {
1500  if constexpr (IsMultiFabLike_v<MF>) {
1501  return MF(mf, amrex::make_alias, 0, mf.nComp());
1502  } else {
1504  amrex::Abort("MLLinOpT::makeAlias: how did we get here?");
1505  return {};
1506  }
1507 }
1508 
1509 template <typename MF>
1510 MF
1511 MLLinOpT<MF>::makeCoarseMG (int amrlev, int mglev, IntVect const& ng) const
1512 {
1513  if constexpr (IsMultiFabLike_v<MF>) {
1514  BoxArray cba = m_grids[amrlev][mglev];
1515  IntVect ratio = (amrlev > 0) ? IntVect(2) : mg_coarsen_ratio_vec[mglev];
1516  cba.coarsen(ratio);
1517  cba.convert(m_ixtype);
1518  return MF(cba, m_dmap[amrlev][mglev], getNComp(), ng);
1519  } else {
1520  amrex::ignore_unused(amrlev, mglev, ng);
1521  amrex::Abort("MLLinOpT::makeCoarseMG: how did we get here?");
1522  return {};
1523  }
1524 }
1525 
1526 template <typename MF>
1527 MF
1528 MLLinOpT<MF>::makeCoarseAmr (int famrlev, IntVect const& ng) const
1529 {
1530  if constexpr (IsMultiFabLike_v<MF>) {
1531  BoxArray cba = m_grids[famrlev][0];
1532  IntVect ratio(AMRRefRatio(famrlev-1));
1533  cba.coarsen(ratio);
1534  cba.convert(m_ixtype);
1535  return MF(cba, m_dmap[famrlev][0], getNComp(), ng);
1536  } else {
1537  amrex::ignore_unused(famrlev, ng);
1538  amrex::Abort("MLLinOpT::makeCoarseAmr: how did we get here?");
1539  return {};
1540  }
1541 }
1542 
1543 template <typename MF>
1544 void
1546 {
1547  if (new_size <= 0 || new_size >= m_num_mg_levels[0]) { return; }
1548 
1549  m_num_mg_levels[0] = new_size;
1550 
1551  m_geom[0].resize(new_size);
1552  m_grids[0].resize(new_size);
1553  m_dmap[0].resize(new_size);
1554  m_factory[0].resize(new_size);
1555 
1556  if (m_bottom_comm != m_default_comm) {
1557  m_bottom_comm = makeSubCommunicator(m_dmap[0].back());
1558  }
1559 }
1560 
1561 template <typename MF>
1562 void
1563 MLLinOpT<MF>::avgDownResMG (int clev, MF& cres, MF const& fres) const
1564 {
1565  amrex::ignore_unused(clev, cres, fres);
1566  if constexpr (amrex::IsFabArray<MF>::value) {
1567  const int ncomp = this->getNComp();
1568 #ifdef AMREX_USE_EB
1569  if (!fres.isAllRegular()) {
1570  if constexpr (std::is_same<MF,MultiFab>()) {
1571  amrex::EB_average_down(fres, cres, 0, ncomp,
1572  mg_coarsen_ratio_vec[clev-1]);
1573  } else {
1574  amrex::Abort("EB_average_down only works with MultiFab");
1575  }
1576  } else
1577 #endif
1578  {
1579  amrex::average_down(fres, cres, 0, ncomp, mg_coarsen_ratio_vec[clev-1]);
1580  }
1581  } else {
1582  amrex::Abort("For non-FabArray, MLLinOpT<MF>::avgDownResMG should be overridden.");
1583  }
1584 }
1585 
1586 template <typename MF>
1587 bool
1588 MLLinOpT<MF>::isMFIterSafe (int amrlev, int mglev1, int mglev2) const
1589 {
1590  return m_dmap[amrlev][mglev1] == m_dmap[amrlev][mglev2]
1591  && BoxArray::SameRefs(m_grids[amrlev][mglev1], m_grids[amrlev][mglev2]);
1592 }
1593 
1594 template <typename MF>
1595 template <typename AMF, std::enable_if_t<!std::is_same_v<MF,AMF>,int>>
1596 void
1597 MLLinOpT<MF>::setLevelBC (int amrlev, const AMF* levelbcdata,
1598  const AMF* robinbc_a, const AMF* robinbc_b,
1599  const AMF* robinbc_f)
1600 {
1601  const int ncomp = this->getNComp();
1602  if (levelbcdata) {
1603  levelbc_raii[amrlev] = std::make_unique<MF>(levelbcdata->boxArray(),
1604  levelbcdata->DistributionMap(),
1605  ncomp, levelbcdata->nGrowVect());
1606  levelbc_raii[amrlev]->LocalCopy(*levelbcdata, 0, 0, ncomp,
1607  levelbcdata->nGrowVect());
1608  } else {
1609  levelbc_raii[amrlev].reset();
1610  }
1611 
1612  if (robinbc_a) {
1613  robin_a_raii[amrlev] = std::make_unique<MF>(robinbc_a->boxArray(),
1614  robinbc_a->DistributionMap(),
1615  ncomp, robinbc_a->nGrowVect());
1616  robin_a_raii[amrlev]->LocalCopy(*robinbc_a, 0, 0, ncomp,
1617  robinbc_a->nGrowVect());
1618  } else {
1619  robin_a_raii[amrlev].reset();
1620  }
1621 
1622  if (robinbc_b) {
1623  robin_b_raii[amrlev] = std::make_unique<MF>(robinbc_b->boxArray(),
1624  robinbc_b->DistributionMap(),
1625  ncomp, robinbc_b->nGrowVect());
1626  robin_b_raii[amrlev]->LocalCopy(*robinbc_b, 0, 0, ncomp,
1627  robinbc_b->nGrowVect());
1628  } else {
1629  robin_b_raii[amrlev].reset();
1630  }
1631 
1632  if (robinbc_f) {
1633  robin_f_raii[amrlev] = std::make_unique<MF>(robinbc_f->boxArray(),
1634  robinbc_f->DistributionMap(),
1635  ncomp, robinbc_f->nGrowVect());
1636  robin_f_raii[amrlev]->LocalCopy(*robinbc_f, 0, 0, ncomp,
1637  robinbc_f->nGrowVect());
1638  } else {
1639  robin_f_raii[amrlev].reset();
1640  }
1641 
1642  this->setLevelBC(amrlev, levelbc_raii[amrlev].get(), robin_a_raii[amrlev].get(),
1643  robin_b_raii[amrlev].get(), robin_f_raii[amrlev].get());
1644 }
1645 
1646 extern template class MLLinOpT<MultiFab>;
1647 
1649 
1650 }
1651 
1652 #endif
#define BL_PROFILE(a)
Definition: AMReX_BLProfiler.H:551
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_ASSERT_WITH_MESSAGE(EX, MSG)
Definition: AMReX_BLassert.H:37
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_ALWAYS_ASSERT(EX)
Definition: AMReX_BLassert.H:50
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
Array4< Real const > crse
Definition: AMReX_InterpFaceRegister.cpp:92
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_PICK(a, b, c)
Definition: AMReX_SPACE.H:151
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
int MPI_Comm
Definition: AMReX_ccse-mpi.H:47
int MPI_Group
Definition: AMReX_ccse-mpi.H:48
static constexpr int MPI_COMM_NULL
Definition: AMReX_ccse-mpi.H:55
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:550
BoxArray & coarsen(int refinement_ratio)
Coarsen each Box in the BoxArray to the specified ratio.
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:820
BoxArray & convert(IndexType typ)
Apply Box::convert(IndexType) to each Box in the BoxArray.
AMREX_GPU_HOST_DEVICE double d_numPts() const noexcept
Returns the number of points contained in the BoxND. This is intended for use only in diagnostic mess...
Definition: AMReX_Box.H:366
AMREX_GPU_HOST_DEVICE BoxND & makeSlab(int direction, int slab_index) noexcept
Definition: AMReX_Box.H:779
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool coarsenable(const IntVectND< dim > &refrat, const IntVectND< dim > &min_width) const noexcept
Definition: AMReX_Box.H:745
AMREX_GPU_HOST_DEVICE BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:708
AMREX_GPU_HOST_DEVICE Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition: AMReX_Box.H:346
Definition: AMReX_FabFactory.H:76
Calculates the distribution of FABs to MPI processes.
Definition: AMReX_DistributionMapping.H:41
static DistributionMapping makeSFC(const MultiFab &weight, bool sort=true)
Definition: AMReX_DistributionMapping.cpp:1762
const Vector< int > & ProcessorMap() const noexcept
Returns a constant reference to the mapping of boxes in the underlying BoxArray to the CPU that holds...
Definition: AMReX_DistributionMapping.cpp:47
Definition: AMReX_EBFabFactory.H:24
Definition: AMReX_FabFactory.H:50
Solve using GMRES with multigrid as preconditioner.
Definition: AMReX_GMRES_MLMG.H:20
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Interface
Definition: AMReX_Hypre.H:21
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE std::size_t size() noexcept
Definition: AMReX_IntVect.H:725
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< new_dim > resize(int fill_extra=0) const noexcept
Returns a new IntVectND of size new_dim by either shrinking or expanding this IntVectND.
Definition: AMReX_IntVect.H:768
Definition: AMReX_MLABecLaplacian.H:15
Definition: AMReX_MLCGSolver.H:12
Definition: AMReX_MLLinOp.H:98
const Geometry & Geom(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:559
const MF * m_coarse_data_for_bc
Definition: AMReX_MLLinOp.H:628
virtual void smooth(int amrlev, int mglev, MF &sol, const MF &rhs, bool skip_fillboundary=false) const =0
Smooth.
void setDomainBC(const Array< BCType, AMREX_SPACEDIM > &lobc, const Array< BCType, AMREX_SPACEDIM > &hibc) noexcept
Boundary of the whole domain.
Definition: AMReX_MLLinOp.H:1181
Vector< Vector< std::unique_ptr< FabFactory< FAB > > > > m_factory
Definition: AMReX_MLLinOp.H:601
virtual void avgDownResMG(int clev, MF &cres, MF const &fres) const
Definition: AMReX_MLLinOp.H:1563
int NAMRLevels() const noexcept
Return the number of AMR levels.
Definition: AMReX_MLLinOp.H:554
bool m_do_consolidation
Definition: AMReX_MLLinOp.H:592
virtual void compGrad(int, const Array< MF *, AMREX_SPACEDIM > &, MF &, Location) const
Compute gradients of the solution.
Definition: AMReX_MLLinOp.H:437
bool isCellCentered() const noexcept
Definition: AMReX_MLLinOp.H:673
const Vector< int > & AMRRefRatio() const noexcept
Return AMR refinement ratios.
Definition: AMReX_MLLinOp.H:632
IntVect m_ixtype
Definition: AMReX_MLLinOp.H:589
virtual std::unique_ptr< MLLinOpT< MF > > makeNLinOp(int) const
Definition: AMReX_MLLinOp.H:488
void setVerbose(int v) noexcept
Set verbosity.
Definition: AMReX_MLLinOp.H:244
Array< Real, AMREX_SPACEDIM > m_domain_bloc_hi
Definition: AMReX_MLLinOp.H:622
bool isMFIterSafe(int amrlev, int mglev1, int mglev2) const
Definition: AMReX_MLLinOp.H:1588
RealVect m_coarse_bc_loc
Definition: AMReX_MLLinOp.H:627
virtual bool needsUpdate() const
Does it need update if it's reused?
Definition: AMReX_MLLinOp.H:266
virtual void interpolation(int amrlev, int fmglev, MF &fine, const MF &crse) const =0
Add interpolated coarse MG level data to fine MG level data.
virtual void setLevelBC(int, const MF *, const MF *=nullptr, const MF *=nullptr, const MF *=nullptr)=0
Set boundary conditions for given level. For cell-centered solves only.
virtual MF make(int amrlev, int mglev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1483
T get_d0(T const &dx, T const &dy, T const &) const noexcept
Definition: AMReX_MLLinOp.H:714
MPI_Comm BottomCommunicator() const noexcept
Definition: AMReX_MLLinOp.H:664
void setEnforceSingularSolvable(bool o) noexcept
Definition: AMReX_MLLinOp.H:253
MPI_Comm Communicator() const noexcept
Definition: AMReX_MLLinOp.H:665
virtual void getFluxes(const Vector< Array< MF *, AMREX_SPACEDIM > > &, const Vector< MF * > &, Location) const
Definition: AMReX_MLLinOp.H:494
int mg_domain_min_width
Definition: AMReX_MLLinOp.H:573
void setMaxOrder(int o) noexcept
Set order of interpolation at coarse/fine boundary.
Definition: AMReX_MLLinOp.H:247
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc_orig
Definition: AMReX_MLLinOp.H:567
virtual void interpAssign(int amrlev, int fmglev, MF &fine, MF &crse) const
Overwrite fine MG level data with interpolated coarse data.
Definition: AMReX_MLLinOp.H:299
virtual std::string name() const
Definition: AMReX_MLLinOp.H:131
void setCoarseFineBC(const AMF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1455
bool doAgglomeration() const noexcept
Definition: AMReX_MLLinOp.H:669
Vector< std::unique_ptr< MF > > levelbc_raii
Definition: AMReX_MLLinOp.H:749
MF m_coarse_data_for_bc_raii
Definition: AMReX_MLLinOp.H:629
virtual void setDirichletNodesToZero(int, int, MF &) const
Definition: AMReX_MLLinOp.H:474
FabFactory< FAB > const * Factory(int amr_lev, int mglev=0) const noexcept
Definition: AMReX_MLLinOp.H:637
std::unique_ptr< CommContainer > m_raii_comm
Definition: AMReX_MLLinOp.H:619
bool m_do_semicoarsening
Definition: AMReX_MLLinOp.H:594
Array4< T > compactify(Array4< T > const &a) const noexcept
Definition: AMReX_MLLinOp.H:700
bool hasRobinBC() const noexcept
Definition: AMReX_MLLinOp.H:1265
Array< Real, AMREX_SPACEDIM > m_domain_bloc_lo
Definition: AMReX_MLLinOp.H:621
virtual void resizeMultiGrid(int new_size)
Definition: AMReX_MLLinOp.H:1545
Vector< Vector< BoxArray > > m_grids
Definition: AMReX_MLLinOp.H:599
virtual MF makeCoarseAmr(int famrlev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1528
Vector< std::unique_ptr< MF > > robin_f_raii
Definition: AMReX_MLLinOp.H:752
bool m_do_agglomeration
Definition: AMReX_MLLinOp.H:591
void setCoarseFineBC(const AMF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1446
virtual MF makeAlias(MF const &mf) const
Definition: AMReX_MLLinOp.H:1498
virtual std::unique_ptr< FabFactory< FAB > > makeFactory(int, int) const
Definition: AMReX_MLLinOp.H:689
void setDomainBC(const Vector< Array< BCType, AMREX_SPACEDIM > > &lobc, const Vector< Array< BCType, AMREX_SPACEDIM > > &hibc) noexcept
Boundary of the whole domain.
Definition: AMReX_MLLinOp.H:1191
static constexpr int mg_coarsen_ratio
Definition: AMReX_MLLinOp.H:571
virtual void copyNSolveSolution(MF &, MF const &) const
Definition: AMReX_MLLinOp.H:534
virtual void solutionResidual(int amrlev, MF &resid, MF &x, const MF &b, const MF *crse_bcdata=nullptr)=0
Compute residual for solution.
int getMaxOrder() const noexcept
Get order of interpolation at coarse/fine boundary.
Definition: AMReX_MLLinOp.H:249
virtual int getNComp() const
Return number of components.
Definition: AMReX_MLLinOp.H:261
virtual void getFluxes(const Vector< MF * > &, const Vector< MF * > &) const
Definition: AMReX_MLLinOp.H:499
void defineBC()
Definition: AMReX_MLLinOp.H:1169
void setCoarseFineBCLocation(const RealVect &cloc) noexcept
Definition: AMReX_MLLinOp.H:667
Vector< int > m_amr_ref_ratio
Definition: AMReX_MLLinOp.H:584
MPI_Comm m_default_comm
Definition: AMReX_MLLinOp.H:604
static void makeAgglomeratedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm)
Definition: AMReX_MLLinOp.H:1294
bool isBottomActive() const noexcept
Definition: AMReX_MLLinOp.H:662
void defineGrids(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const Vector< FabFactory< FAB > const * > &a_factory)
Definition: AMReX_MLLinOp.H:795
virtual BottomSolver getDefaultBottomSolver() const
Definition: AMReX_MLLinOp.H:258
GpuArray< BCType, AMREX_SPACEDIM > LoBC(int icomp=0) const noexcept
Definition: AMReX_MLLinOp.H:641
typename FabDataType< MF >::fab_type FAB
Definition: AMReX_MLLinOp.H:108
virtual RT normInf(int amrlev, MF const &mf, bool local) const =0
Vector< int > m_num_mg_levels
Definition: AMReX_MLLinOp.H:586
bool hasBC(BCType bct) const noexcept
Definition: AMReX_MLLinOp.H:1243
static void makeConsolidatedDMap(const Vector< BoxArray > &ba, Vector< DistributionMapping > &dm, int ratio, int strategy)
Definition: AMReX_MLLinOp.H:1323
Vector< Vector< DistributionMapping > > m_dmap
Definition: AMReX_MLLinOp.H:600
int verbose
Definition: AMReX_MLLinOp.H:577
virtual void checkPoint(std::string const &) const
Definition: AMReX_MLLinOp.H:745
IntVect m_coarse_data_crse_ratio
Definition: AMReX_MLLinOp.H:626
LinOpBCType BCType
Definition: AMReX_MLLinOp.H:111
virtual void correctionResidual(int amrlev, int mglev, MF &resid, MF &x, const MF &b, BCMode bc_mode, const MF *crse_bcdata=nullptr)=0
Compute residual for the residual-correction form, resid = b - L(x)
virtual void compFlux(int, const Array< MF *, AMREX_SPACEDIM > &, MF &, Location) const
Compute fluxes.
Definition: AMReX_MLLinOp.H:423
MLLinOpT(MLLinOpT< MF > &&)=delete
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc_orig
Definition: AMReX_MLLinOp.H:566
virtual void postSolve(Vector< MF > &) const
Definition: AMReX_MLLinOp.H:536
void setCoarseFineBC(const MF *crse, int crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Set coarse/fine boundary conditions. For cell-centered solves only.
Definition: AMReX_MLLinOp.H:1427
virtual void apply(int amrlev, int mglev, MF &out, MF &in, BCMode bc_mode, StateMode s_mode, const MLMGBndryT< MF > *bndry=nullptr) const =0
Apply the linear operator, out = L(in)
void setDomainBCLoc(const Array< Real, AMREX_SPACEDIM > &lo_bcloc, const Array< Real, AMREX_SPACEDIM > &hi_bcloc) noexcept
Set location of domain boundaries.
Definition: AMReX_MLLinOp.H:1418
bool needsCoarseDataForBC() const noexcept
Needs coarse data for bc?
Definition: AMReX_MLLinOp.H:177
typename FabDataType< MF >::value_type RT
Definition: AMReX_MLLinOp.H:109
virtual void update()
Update for reuse.
Definition: AMReX_MLLinOp.H:268
virtual void applyMetricTerm(int, int, MF &) const
apply metric terms if there are any
Definition: AMReX_MLLinOp.H:444
virtual void unapplyMetricTerm(int, int, MF &) const
unapply metric terms if there are any
Definition: AMReX_MLLinOp.H:446
bool hasHiddenDimension() const noexcept
Definition: AMReX_MLLinOp.H:695
virtual bool isBottomSingular() const =0
Is the bottom of MG singular?
virtual void reflux(int crse_amrlev, MF &res, const MF &crse_sol, const MF &crse_rhs, MF &fine_res, MF &fine_sol, const MF &fine_rhs) const
Reflux at AMR coarse/fine boundary.
Definition: AMReX_MLLinOp.H:406
virtual IntVect getNGrowVectRestriction() const
Definition: AMReX_MLLinOp.H:675
Vector< std::unique_ptr< MF > > robin_b_raii
Definition: AMReX_MLLinOp.H:751
virtual Vector< RT > getSolvabilityOffset(int, int, MF const &) const
get offset for fixing solvability
Definition: AMReX_MLLinOp.H:461
static constexpr int mg_box_min_width
Definition: AMReX_MLLinOp.H:572
virtual MF makeCoarseMG(int amrlev, int mglev, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1511
virtual void fixSolvabilityByOffset(int, int, MF &, Vector< RT > const &) const
fix solvability by subtracting offset from RHS
Definition: AMReX_MLLinOp.H:465
Vector< Array< BCType, AMREX_SPACEDIM > > m_lobc
Definition: AMReX_MLLinOp.H:562
int hiddenDirection() const noexcept
Definition: AMReX_MLLinOp.H:696
Vector< int > m_domain_covered
Definition: AMReX_MLLinOp.H:602
const MLLinOpT< MF > * m_parent
Definition: AMReX_MLLinOp.H:587
bool doSemicoarsening() const noexcept
Definition: AMReX_MLLinOp.H:671
virtual bool supportNSolve() const
Definition: AMReX_MLLinOp.H:532
virtual bool supportRobinBC() const noexcept
Definition: AMReX_MLLinOp.H:656
virtual void normalize(int amrlev, int mglev, MF &mf) const
Divide mf by the diagonal component of the operator. Used by bicgstab.
Definition: AMReX_MLLinOp.H:363
virtual void applyInhomogNeumannTerm(int, MF &) const
Extra terms introduced when we treat inhomogeneous Nuemann BC as homogeneous.
Definition: AMReX_MLLinOp.H:452
virtual void avgDownResAmr(int clev, MF &cres, MF const &fres) const
Definition: AMReX_MLLinOp.H:542
Vector< Vector< Geometry > > m_geom
first Vector is for amr level and second is mg level
Definition: AMReX_MLLinOp.H:598
MLLinOpT(const MLLinOpT< MF > &)=delete
virtual void averageDownAndSync(Vector< MF > &sol) const =0
void setCoarseFineBC(const MF *crse, IntVect const &crse_ratio, LinOpBCType bc_type=LinOpBCType::Dirichlet) noexcept
Definition: AMReX_MLLinOp.H:1435
MLLinOpT()=default
MLLinOpT< MF > & operator=(const MLLinOpT< MF > &)=delete
Box compactify(Box const &b) const noexcept
Definition: AMReX_MLLinOp.H:1272
bool m_needs_coarse_data_for_bc
Definition: AMReX_MLLinOp.H:624
int maxorder
Definition: AMReX_MLLinOp.H:579
void define(const Vector< Geometry > &a_geom, const Vector< BoxArray > &a_grids, const Vector< DistributionMapping > &a_dmap, const LPInfo &a_info, const Vector< FabFactory< FAB > const * > &a_factory, bool eb_limit_coarsening=true)
Definition: AMReX_MLLinOp.H:757
Vector< IntVect > mg_coarsen_ratio_vec
Definition: AMReX_MLLinOp.H:595
MF MFType
Definition: AMReX_MLLinOp.H:107
virtual ~MLLinOpT()=default
virtual void averageDownSolutionRHS(int camrlev, MF &crse_sol, MF &crse_rhs, const MF &fine_sol, const MF &fine_rhs)
Average-down data from fine AMR level to coarse AMR level.
Definition: AMReX_MLLinOp.H:329
LPInfo info
Definition: AMReX_MLLinOp.H:575
MPI_Comm makeSubCommunicator(const DistributionMapping &dm)
Definition: AMReX_MLLinOp.H:1379
Vector< Array< BCType, AMREX_SPACEDIM > > m_hibc
Definition: AMReX_MLLinOp.H:563
int NMGLevels(int amrlev) const noexcept
Return the number of MG levels at given AMR level.
Definition: AMReX_MLLinOp.H:557
virtual void scaleRHS(int, MF &) const
scale RHS to fix solvability
Definition: AMReX_MLLinOp.H:458
T get_d1(T const &, T const &dy, T const &dz) const noexcept
Definition: AMReX_MLLinOp.H:724
bool enforceSingularSolvable
Definition: AMReX_MLLinOp.H:581
void setLevelBC(int amrlev, const AMF *levelbcdata, const AMF *robinbc_a=nullptr, const AMF *robinbc_b=nullptr, const AMF *robinbc_f=nullptr)
Definition: AMReX_MLLinOp.H:1597
virtual RT xdoty(int amrlev, int mglev, const MF &x, const MF &y, bool local) const =0
x dot y, used by the bottom solver
virtual void interpolationAmr(int famrlev, MF &fine, const MF &crse, IntVect const &nghost) const
Interpolation between AMR levels.
Definition: AMReX_MLLinOp.H:313
bool doConsolidation() const noexcept
Definition: AMReX_MLLinOp.H:670
virtual bool supportInhomogNeumannBC() const noexcept
Definition: AMReX_MLLinOp.H:657
virtual void prepareForFluxes(int, const MF *=nullptr)
Definition: AMReX_MLLinOp.H:379
LinOpBCType m_coarse_fine_bc_type
Definition: AMReX_MLLinOp.H:625
virtual bool isSingular(int amrlev) const =0
Is it singular on given AMR level?
GpuArray< BCType, AMREX_SPACEDIM > HiBC(int icomp=0) const noexcept
Definition: AMReX_MLLinOp.H:646
int AMRRefRatio(int amr_lev) const noexcept
Return AMR refinement ratio at given AMR level.
Definition: AMReX_MLLinOp.H:635
MPI_Comm m_bottom_comm
Definition: AMReX_MLLinOp.H:605
virtual void restriction(int amrlev, int cmglev, MF &crse, MF &fine) const =0
Restriction onto coarse MG level.
virtual void applyOverset(int, MF &) const
for overset solver only
Definition: AMReX_MLLinOp.H:455
virtual void unimposeNeumannBC(int, MF &) const
This is needed for our nodal projection solver.
Definition: AMReX_MLLinOp.H:449
virtual void prepareForGMRES()
Definition: AMReX_MLLinOp.H:470
bool getEnforceSingularSolvable() const noexcept
Definition: AMReX_MLLinOp.H:256
virtual void prepareForSolve()=0
virtual void make(Vector< Vector< MF > > &mf, IntVect const &ng) const
Definition: AMReX_MLLinOp.H:1469
bool hasInhomogNeumannBC() const noexcept
Definition: AMReX_MLLinOp.H:1258
Vector< std::unique_ptr< MF > > robin_a_raii
Definition: AMReX_MLLinOp.H:750
virtual int getNGrow(int=0, int=0) const
Definition: AMReX_MLLinOp.H:263
int m_num_amr_levels
Definition: AMReX_MLLinOp.H:583
Definition: AMReX_MLMGBndry.H:12
Definition: AMReX_MLMG.H:12
Definition: AMReX_MLPoisson.H:16
This class provides the user with a few print options.
Definition: AMReX_Print.H:35
A Box with real dimensions. A RealBox is OK iff volume >= 0.
Definition: AMReX_RealBox.H:21
A Real vector in SpaceDim-dimensional space.
Definition: AMReX_RealVect.H:32
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
AMREX_EXPORT int max_grid_size
Definition: AMReX_EB2.cpp:23
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
bool notInLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:87
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition: AMReX_ParallelContext.H:70
int local_to_global_rank(int rank) noexcept
translate between local rank and global rank
Definition: AMReX_ParallelContext.H:95
int global_to_local_rank(int rank) noexcept
Definition: AMReX_ParallelContext.H:98
int NProcsSub() noexcept
number of ranks in current frame
Definition: AMReX_ParallelContext.H:74
MPI_Comm Communicator() noexcept
Definition: AMReX_ParallelDescriptor.H:210
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
@ min
Definition: AMReX_ParallelReduce.H:18
Definition: AMReX_Amr.cpp:49
@ make_alias
Definition: AMReX_MakeType.H:7
void average_down(const MultiFab &S_fine, MultiFab &S_crse, const Geometry &fgeom, const Geometry &cgeom, int scomp, int ncomp, int rr)
Definition: AMReX_MultiFabUtil.cpp:314
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > enclosedCells(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with CELL based coordinates in direction dir that is enclosed by b....
Definition: AMReX_Box.H:1463
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IntVectND< dim > &typ) noexcept
Returns a BoxND with different type.
Definition: AMReX_Box.H:1435
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition: AMReX_Tuple.H:179
void EB_average_down(const MultiFab &S_fine, MultiFab &S_crse, const MultiFab &vol_fine, const MultiFab &vfrac_fine, int scomp, int ncomp, const IntVect &ratio)
Definition: AMReX_EBMultiFabUtil.cpp:336
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
BottomSolver
Definition: AMReX_MLLinOp.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1304
void Warning(const std::string &msg)
Print out warning message to cerr.
Definition: AMReX.cpp:231
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
const int[]
Definition: AMReX_BLProfiler.cpp:1664
int verbose
Definition: AMReX_DistributionMapping.cpp:36
std::unique_ptr< Hypre > makeHypre(const BoxArray &grids, const DistributionMapping &dmap, const Geometry &geom, MPI_Comm comm_, Hypre::Interface interface, const iMultiFab *overset_mask)
Definition: AMReX_Hypre.cpp:12
std::array< T, N > Array
Definition: AMReX_Array.H:24
Definition: AMReX_Array4.H:61
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int nComp() const noexcept
Definition: AMReX_Array4.H:248
Definition: AMReX_FabDataType.H:9
Definition: AMReX_Array.H:34
Definition: AMReX_TypeTraits.H:29
Definition: AMReX_MLLinOp.H:35
int con_strategy
Definition: AMReX_MLLinOp.H:42
bool do_semicoarsening
Definition: AMReX_MLLinOp.H:38
bool has_metric_term
Definition: AMReX_MLLinOp.H:43
LPInfo & setAgglomerationGridSize(int x) noexcept
Definition: AMReX_MLLinOp.H:52
int max_semicoarsening_level
Definition: AMReX_MLLinOp.H:45
bool hasHiddenDimension() const noexcept
Definition: AMReX_MLLinOp.H:62
LPInfo & setSemicoarsening(bool x) noexcept
Definition: AMReX_MLLinOp.H:51
LPInfo & setMaxCoarseningLevel(int n) noexcept
Definition: AMReX_MLLinOp.H:57
LPInfo & setConsolidationGridSize(int x) noexcept
Definition: AMReX_MLLinOp.H:53
LPInfo & setAgglomeration(bool x) noexcept
Definition: AMReX_MLLinOp.H:49
int con_ratio
Definition: AMReX_MLLinOp.H:41
bool do_consolidation
Definition: AMReX_MLLinOp.H:37
int con_grid_size
Definition: AMReX_MLLinOp.H:40
bool do_agglomeration
Definition: AMReX_MLLinOp.H:36
LPInfo & setHiddenDirection(int n) noexcept
Definition: AMReX_MLLinOp.H:60
static constexpr int getDefaultConsolidationGridSize()
Definition: AMReX_MLLinOp.H:74
LPInfo & setConsolidation(bool x) noexcept
Definition: AMReX_MLLinOp.H:50
int max_coarsening_level
Definition: AMReX_MLLinOp.H:44
LPInfo & setMaxSemicoarseningLevel(int n) noexcept
Definition: AMReX_MLLinOp.H:58
int agg_grid_size
Definition: AMReX_MLLinOp.H:39
static constexpr int getDefaultAgglomerationGridSize()
Definition: AMReX_MLLinOp.H:66
LPInfo & setConsolidationStrategy(int x) noexcept
Definition: AMReX_MLLinOp.H:55
int hidden_direction
Definition: AMReX_MLLinOp.H:47
LPInfo & setMetricTerm(bool x) noexcept
Definition: AMReX_MLLinOp.H:56
LPInfo & setSemicoarseningDirection(int n) noexcept
Definition: AMReX_MLLinOp.H:59
LPInfo & setConsolidationRatio(int x) noexcept
Definition: AMReX_MLLinOp.H:54
int semicoarsening_direction
Definition: AMReX_MLLinOp.H:46
Definition: AMReX_MLLinOp.H:84
StateMode
Definition: AMReX_MLLinOp.H:86
BCMode
Definition: AMReX_MLLinOp.H:85
Location
Definition: AMReX_MLLinOp.H:87
FabArray memory allocation information.
Definition: AMReX_FabArray.H:66
Definition: AMReX_MLLinOp.H:606
void operator=(const CommContainer &)=delete
~CommContainer()
Definition: AMReX_MLLinOp.H:613
MPI_Comm comm
Definition: AMReX_MLLinOp.H:607
CommContainer(const CommContainer &)=delete
CommContainer(CommContainer &&)=delete
CommContainer(MPI_Comm m) noexcept
Definition: AMReX_MLLinOp.H:608