Block-Structured AMR Software Framework
AMReX_Box.H
Go to the documentation of this file.
1 
2 #ifndef AMREX_BOX_H_
3 #define AMREX_BOX_H_
4 #include <AMReX_Config.H>
5 
6 #include <AMReX_Algorithm.H>
7 #include <AMReX_ArrayLim.H>
8 #include <AMReX_ccse-mpi.H>
9 #include <AMReX_IntVect.H>
10 #include <AMReX_IndexType.H>
11 #include <AMReX_Orientation.H>
12 #include <AMReX_SPACE.H>
13 #include <AMReX_Array.H>
14 #include <AMReX_Array4.H>
15 #include <AMReX_Vector.H>
16 #include <AMReX_GpuQualifiers.H>
17 #include <AMReX_GpuControl.H>
18 #include <AMReX_Math.H>
19 
20 #include <iosfwd>
21 
22 namespace amrex
23 {
24 template<int dim>
25 class BoxND;
26 using Box = BoxND<AMREX_SPACEDIM>;
27 class BoxCommHelper;
28 
41 template<int dim>
42 class BoxND
43 {
45  friend class BoxCommHelper;
46 
47 public:
48  /*
49  * \brief The default constructor. For safety, the constructed BoxND is
50  * invalid and may be tested for validity with ok().
51  * DO NOT CHANGE THIS BEHAVIOR!
52  */
54  constexpr BoxND () noexcept
55  : smallend(1),
56  bigend(0)
57  {}
58 
61  constexpr BoxND (const IntVectND<dim>& small, const IntVectND<dim>& big) noexcept
62  : smallend(small),
63  bigend(big)
64  {}
65 
68  BoxND (const IntVectND<dim>& small, const int* vec_len) noexcept
69  : smallend(small),
70  bigend(small + IntVectND<dim>(vec_len) - 1)
71  {}
72 
78  BoxND (const IntVectND<dim>& small, const IntVectND<dim>& big, const IntVectND<dim>& typ) noexcept
79  : smallend(small),
80  bigend(big),
81  btype(typ)
82  {
83  BL_ASSERT(typ.allGE(0) && typ.allLE(1));
84  }
85 
88  BoxND (const IntVectND<dim>& small, const IntVectND<dim>& big, IndexTypeND<dim> t) noexcept
89  : smallend(small),
90  bigend(big),
91  btype(t)
92  {}
93 
94  template <typename T, int Tdim=dim, std::enable_if_t<( 1<=Tdim && Tdim<=3 ), int> = 0>
95  AMREX_GPU_HOST_DEVICE
96  explicit BoxND (Array4<T> const& a) noexcept
97  : smallend(a.begin),
98  bigend(IntVectND<dim>(a.end) - 1)
99  {}
100 
101  // dtor, copy-ctor, copy-op=, move-ctor, and move-op= are compiler generated.
102 
104  [[nodiscard]] AMREX_GPU_HOST_DEVICE
105  const IntVectND<dim>& smallEnd () const& noexcept { return smallend; }
106 
108  [[nodiscard]] const IntVectND<dim>& smallEnd () && = delete;
109 
111  [[nodiscard]] AMREX_GPU_HOST_DEVICE
112  int smallEnd (int dir) const& noexcept { return smallend[dir]; }
113 
115  [[nodiscard]] AMREX_GPU_HOST_DEVICE
116  const IntVectND<dim>& bigEnd () const& noexcept { return bigend; }
117 
119  [[nodiscard]] const IntVectND<dim>& bigEnd () && = delete;
120 
122  [[nodiscard]] AMREX_GPU_HOST_DEVICE
123  int bigEnd (int dir) const noexcept { return bigend[dir]; }
124 
126  [[nodiscard]] AMREX_GPU_HOST_DEVICE
127  IndexTypeND<dim> ixType () const noexcept { return btype; }
128 
130  [[nodiscard]] AMREX_GPU_HOST_DEVICE
131  IntVectND<dim> type () const noexcept { return btype.ixType(); }
132 
134  [[nodiscard]] AMREX_GPU_HOST_DEVICE
135  IndexType::CellIndex type (int dir) const noexcept { return btype.ixType(dir); }
136 
138  [[nodiscard]] AMREX_GPU_HOST_DEVICE
139  IntVectND<dim> size () const noexcept
140  {
141  return bigend - smallend + 1;
142  }
143 
145  [[nodiscard]] AMREX_GPU_HOST_DEVICE
146  IntVectND<dim> length () const noexcept
147  {
148  return bigend - smallend + 1;
149  }
150 
152  [[nodiscard]] AMREX_GPU_HOST_DEVICE
153  int length (int dir) const noexcept { return bigend[dir] - smallend[dir] + 1; }
154 
155  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
156  [[nodiscard]] AMREX_GPU_HOST_DEVICE
157  GpuArray<int,3> length3d () const noexcept {
158  Dim3 len3d = length().dim3(1);
159  return {{len3d.x, len3d.y, len3d.z}};
160  }
161 
162  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
163  [[nodiscard]] AMREX_GPU_HOST_DEVICE
164  GpuArray<int,3> loVect3d () const noexcept {
165  Dim3 lo3d = smallend.dim3(0);
166  return {{lo3d.x, lo3d.y, lo3d.z}};
167  }
168 
169  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
170  [[nodiscard]] AMREX_GPU_HOST_DEVICE
171  GpuArray<int,3> hiVect3d () const noexcept {
172  Dim3 hi3d = bigend.dim3(0);
173  return {{hi3d.x, hi3d.y, hi3d.z}};
174  }
175 
177  [[nodiscard]] AMREX_GPU_HOST_DEVICE
178  const int* loVect () const& noexcept { return smallend.getVect(); }
179  AMREX_GPU_HOST_DEVICE
180  const int* loVect () && = delete;
182  [[nodiscard]] AMREX_GPU_HOST_DEVICE
183  const int* hiVect () const& noexcept { return bigend.getVect(); }
184  AMREX_GPU_HOST_DEVICE
185  const int* hiVect () && = delete;
186 
188  [[nodiscard]] AMREX_GPU_HOST_DEVICE
189  int operator[] (Orientation face) const noexcept {
190  const int dir = face.coordDir();
191  return face.isLow() ? smallend[dir] : bigend[dir];
192  }
193 
195  [[nodiscard]] AMREX_GPU_HOST_DEVICE
196  bool isEmpty () const noexcept { return !ok(); }
197 
199  [[nodiscard]] AMREX_GPU_HOST_DEVICE
200  bool ok () const noexcept { return bigend.allGE(smallend) && btype.ok(); }
201 
203  [[nodiscard]] AMREX_GPU_HOST_DEVICE
204  bool contains (const IntVectND<dim>& p) const noexcept {
205  return p.allGE(smallend) && p.allLE(bigend);
206  }
207 
209  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
210  [[nodiscard]] AMREX_GPU_HOST_DEVICE
211  bool contains (const Dim3& p) const noexcept {
212  IntVectND<dim> piv{p};
213  return contains(piv);
214  }
215 
217  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
218  [[nodiscard]] AMREX_GPU_HOST_DEVICE
219  bool contains (int i, int j, int k) const noexcept {
220  Dim3 p3d{i, j, k};
221  return contains(p3d);
222  }
223 
227  [[nodiscard]] AMREX_GPU_HOST_DEVICE
228  bool contains (const BoxND& b) const noexcept
229  {
230  BL_ASSERT(sameType(b));
231  return b.smallend.allGE(smallend) && b.bigend.allLE(bigend);
232  }
233 
235  [[nodiscard]] AMREX_GPU_HOST_DEVICE
236  bool strictly_contains (const IntVectND<dim>& p) const noexcept {
237  return p.allGT(smallend) && p.allLT(bigend);
238  }
239 
244  [[nodiscard]] AMREX_GPU_HOST_DEVICE
245  bool strictly_contains (const BoxND& b) const noexcept
246  {
247  BL_ASSERT(sameType(b));
248  return b.smallend.allGT(smallend) && b.bigend.allLT(bigend);
249  }
250 
252  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
253  [[nodiscard]] AMREX_GPU_HOST_DEVICE
254  bool strictly_contains (const Dim3& p) const noexcept {
255  IntVectND<dim> piv{p};
256  return strictly_contains(piv);
257  }
258 
260  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
261  [[nodiscard]] AMREX_GPU_HOST_DEVICE
262  bool strictly_contains (int i, int j, int k) const noexcept {
263  Dim3 p3d{i, j, k};
264  return strictly_contains(p3d);
265  }
266 
271  [[nodiscard]] AMREX_GPU_HOST_DEVICE
272  bool intersects (const BoxND& b) const noexcept { BoxND isect(*this); isect &= b; return isect.ok(); }
273 
278  [[nodiscard]] AMREX_GPU_HOST_DEVICE
279  bool sameSize (const BoxND& b) const noexcept {
280  BL_ASSERT(sameType(b));
281  return length() == b.length();
282  }
283 
285  [[nodiscard]] AMREX_GPU_HOST_DEVICE
286  bool sameType (const BoxND &b) const noexcept { return btype == b.btype; }
287 
289  [[nodiscard]] AMREX_GPU_HOST_DEVICE
290  bool operator== (const BoxND& b) const noexcept { return smallend == b.smallend && bigend == b.bigend && b.btype == btype; }
291 
293  [[nodiscard]] AMREX_GPU_HOST_DEVICE
294  bool operator!= (const BoxND& b) const noexcept { return !operator==(b); }
295 
296  [[nodiscard]] AMREX_GPU_HOST_DEVICE
297  bool operator< (const BoxND& rhs) const noexcept
298  {
299  return btype < rhs.btype ||
300  ((btype == rhs.btype) &&
301  ( (smallend < rhs.smallend) ||
302  ((smallend == rhs.smallend) && (bigend < rhs.bigend)) ));
303  }
304  [[nodiscard]] AMREX_GPU_HOST_DEVICE
305  bool operator <= (const BoxND& rhs) const noexcept {
306  return !(rhs < *this);
307  }
308  [[nodiscard]] AMREX_GPU_HOST_DEVICE
309  bool operator> (const BoxND& rhs) const noexcept {
310  return rhs < *this;
311  }
312  [[nodiscard]] AMREX_GPU_HOST_DEVICE
313  bool operator>= (const BoxND& rhs) const noexcept {
314  return !(*this < rhs);
315  }
316 
318  [[nodiscard]] AMREX_GPU_HOST_DEVICE
319  bool cellCentered () const noexcept { return !btype.any(); }
320 
322  void checkOverflow () const noexcept {
323  if (ok()) {
324  for (int i = 0; i < dim; ++i) {
325  auto lo = static_cast<Long>(smallend[i]);
326  auto hi = static_cast<Long>(bigend[i]);
327  Long len = hi - lo + 1;
328  AMREX_ALWAYS_ASSERT_WITH_MESSAGE(len>=0 && len<std::numeric_limits<int>::max(),
329  "Overflow when computing length of box");
330  }
331  auto num_pts = static_cast<Long>(length(0));
332  for (int i = 1; i < dim; ++i) {
333  auto len = static_cast<Long>(length(i));
334  AMREX_ALWAYS_ASSERT_WITH_MESSAGE(num_pts == 0 || len == 0 ||
335  num_pts <= std::numeric_limits<Long>::max() / len,
336  "Overflow when computing numPts of box");
337  num_pts *= len;
338  }
339  }
340  }
341 
345  [[nodiscard]] AMREX_GPU_HOST_DEVICE
346  Long numPts () const noexcept {
347 #if defined(AMREX_DEBUG) || defined(AMREX_USE_ASSERTION)
348  AMREX_IF_ON_HOST((checkOverflow();))
349 #endif
350  if (ok()) {
351  auto num_pts = static_cast<Long>(length(0));
352  for (int i = 1; i < dim; ++i) {
353  num_pts *= static_cast<Long>(length(i));
354  }
355  return num_pts;
356  } else {
357  return Long(0);
358  }
359  }
360 
365  [[nodiscard]] AMREX_GPU_HOST_DEVICE
366  double d_numPts () const noexcept {
367  if (ok()) {
368  auto num_pts = static_cast<double>(length(0));
369  for (int i = 1; i < dim; ++i) {
370  num_pts *= static_cast<double>(length(i));
371  }
372  return num_pts;
373  } else {
374  return 0.0;
375  }
376  }
377 
383  [[nodiscard]] AMREX_GPU_HOST_DEVICE
384  Long volume () const noexcept {
385  if (ok()) {
386  auto num_pts = static_cast<Long>(length(0)-btype[0]);
387  for (int i = 1; i < dim; ++i) {
388  num_pts *= static_cast<Long>(length(i)-btype[i]);
389  }
390  return num_pts;
391  } else {
392  return Long(0);
393  }
394  }
395 
400  [[nodiscard]] AMREX_GPU_HOST_DEVICE
401  int longside (int& dir) const noexcept {
402  int maxlen = length(0);
403  dir = 0;
404  for (int i = 1; i < dim; i++)
405  {
406  if (length(i) > maxlen)
407  {
408  maxlen = length(i);
409  dir = i;
410  }
411  }
412  return maxlen;
413  }
414 
416  [[nodiscard]] AMREX_GPU_HOST_DEVICE
417  int longside () const noexcept {
418  int ignore = 0;
419  return longside(ignore);
420  }
421 
426  [[nodiscard]] AMREX_GPU_HOST_DEVICE
427  int shortside (int& dir) const noexcept {
428  int minlen = length(0);
429  dir = 0;
430  for (int i = 1; i < dim; i++)
431  {
432  if (length(i) < minlen)
433  {
434  minlen = length(i);
435  dir = i;
436  }
437  }
438  return minlen;
439  }
440 
442  [[nodiscard]] AMREX_GPU_HOST_DEVICE
443  int shortside () const noexcept {
444  int ignore = 0;
445  return shortside(ignore);
446  }
447 
453  [[nodiscard]] AMREX_GPU_HOST_DEVICE
454  Long index (const IntVectND<dim>& v) const noexcept;
455 
457  [[nodiscard]] AMREX_GPU_HOST_DEVICE
458  IntVectND<dim> atOffset (Long offset) const noexcept;
459 
460  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
461  [[nodiscard]] AMREX_GPU_HOST_DEVICE
462  GpuArray<int,3> atOffset3d (Long offset) const noexcept;
463 
465  AMREX_GPU_HOST_DEVICE
466  BoxND& setSmall (const IntVectND<dim>& sm) noexcept { smallend = sm; return *this; }
467 
469  AMREX_GPU_HOST_DEVICE
470  BoxND& setSmall (int dir, int sm_index) noexcept { smallend.setVal(dir,sm_index); return *this; }
471 
473  AMREX_GPU_HOST_DEVICE
474  BoxND& setBig (const IntVectND<dim>& bg) noexcept { bigend = bg; return *this; }
475 
477  AMREX_GPU_HOST_DEVICE
478  BoxND& setBig (int dir, int bg_index) noexcept { bigend.setVal(dir,bg_index); return *this; }
479 
485  AMREX_GPU_HOST_DEVICE
486  BoxND& setRange (int dir,
487  int sm_index,
488  int n_cells = 1) noexcept;
489 
491  AMREX_GPU_HOST_DEVICE
492  BoxND& setType (const IndexTypeND<dim>& t) noexcept { btype = t; return *this; }
493 
495  AMREX_GPU_HOST_DEVICE
496  BoxND& shift (int dir, int nzones) noexcept { smallend.shift(dir,nzones); bigend.shift(dir,nzones); return *this; }
497 
499  AMREX_GPU_HOST_DEVICE
500  BoxND& shift (const IntVectND<dim>& iv) noexcept { smallend.shift(iv); bigend.shift(iv); return *this; }
501 
511  AMREX_GPU_HOST_DEVICE
512  BoxND& shiftHalf (int dir, int num_halfs) noexcept;
513 
515  AMREX_GPU_HOST_DEVICE
516  BoxND& shiftHalf (const IntVectND<dim>& iv) noexcept;
517 
525  AMREX_GPU_HOST_DEVICE
526  BoxND& convert (IndexTypeND<dim> typ) noexcept;
527 
535  AMREX_GPU_HOST_DEVICE
536  BoxND& convert (const IntVectND<dim>& typ) noexcept;
537 
539  AMREX_GPU_HOST_DEVICE
540  BoxND& surroundingNodes () noexcept;
541 
543  AMREX_GPU_HOST_DEVICE
544  BoxND& surroundingNodes (int dir) noexcept;
545 
546  AMREX_GPU_HOST_DEVICE
547  BoxND& surroundingNodes (Direction d) noexcept { return surroundingNodes(static_cast<int>(d)); }
548 
550  AMREX_GPU_HOST_DEVICE
551  BoxND& enclosedCells () noexcept;
552 
554  AMREX_GPU_HOST_DEVICE
555  BoxND& enclosedCells (int dir) noexcept;
556 
557  AMREX_GPU_HOST_DEVICE
558  BoxND& enclosedCells (Direction d) noexcept { return enclosedCells(static_cast<int>(d)); }
559 
564  AMREX_GPU_HOST_DEVICE
565  BoxND operator& (const BoxND& rhs) const noexcept { BoxND lhs(*this); lhs &= rhs; return lhs; }
566 
568  AMREX_GPU_HOST_DEVICE
569  BoxND& operator&= (const BoxND& rhs) noexcept
570  {
571  BL_ASSERT(sameType(rhs));
572  smallend.max(rhs.smallend);
573  bigend.min(rhs.bigend);
574  return *this;
575  }
576 
582  AMREX_GPU_HOST_DEVICE
583  BoxND& minBox (const BoxND& b) noexcept {
584  // BoxArray may call this with not ok boxes. BL_ASSERT(b.ok() && ok());
585  BL_ASSERT(sameType(b));
586  smallend.min(b.smallend);
587  bigend.max(b.bigend);
588  return *this;
589  }
590 
592  AMREX_GPU_HOST_DEVICE
593  BoxND& operator+= (const IntVectND<dim>& v) noexcept { smallend += v; bigend += v; return *this; }
594 
596  AMREX_GPU_HOST_DEVICE
597  BoxND operator+ (const IntVectND<dim>& v) const noexcept { BoxND r(*this); r += v; return r; }
598 
600  AMREX_GPU_HOST_DEVICE
601  BoxND& operator-= (const IntVectND<dim>& v) noexcept { smallend -= v; bigend -= v; return *this; }
602 
604  AMREX_GPU_HOST_DEVICE
605  BoxND operator- (const IntVectND<dim>& v) const noexcept { BoxND r(*this); r -= v; return r; }
606 
619  AMREX_GPU_HOST_DEVICE
620  BoxND chop (int dir, int chop_pnt) noexcept;
621 
622  /*
623  * \brief Grow BoxND in all directions by given amount.
624  * NOTE: n_cell negative shrinks the BoxND by that number of cells.
625  */
626  AMREX_GPU_HOST_DEVICE
627  BoxND& grow (int i) noexcept { smallend.diagShift(-i); bigend.diagShift(i); return *this; }
628 
630  AMREX_GPU_HOST_DEVICE
631  BoxND& grow (const IntVectND<dim>& v) noexcept { smallend -= v; bigend += v; return *this;}
632 
637  AMREX_GPU_HOST_DEVICE
638  BoxND& grow (int idir, int n_cell) noexcept { smallend.shift(idir, -n_cell); bigend.shift(idir, n_cell); return *this; }
639 
640  AMREX_GPU_HOST_DEVICE
641  BoxND& grow (Direction d, int n_cell) noexcept { return grow(static_cast<int>(d), n_cell); }
642 
647  AMREX_GPU_HOST_DEVICE
648  BoxND& growLo (int idir, int n_cell = 1) noexcept { smallend.shift(idir, -n_cell); return *this; }
649 
650  AMREX_GPU_HOST_DEVICE
651  BoxND& growLo (Direction d, int n_cell = 1) noexcept { return growLo(static_cast<int>(d), n_cell); }
652 
658  AMREX_GPU_HOST_DEVICE
659  BoxND& growHi (int idir, int n_cell = 1) noexcept { bigend.shift(idir,n_cell); return *this; }
660 
661  AMREX_GPU_HOST_DEVICE
662  BoxND& growHi (Direction d, int n_cell = 1) noexcept { return growHi(static_cast<int>(d), n_cell); }
663 
665  AMREX_GPU_HOST_DEVICE
666  BoxND& grow (Orientation face, int n_cell = 1) noexcept {
667  int idir = face.coordDir();
668  if (face.isLow()) {
669  smallend.shift(idir, -n_cell);
670  } else {
671  bigend.shift(idir,n_cell);
672  }
673  return *this;
674  }
675 
683  AMREX_GPU_HOST_DEVICE
684  BoxND& refine (int ref_ratio) noexcept {
685  return this->refine(IntVectND<dim>(ref_ratio));
686  }
687 
688  /*
689  * \brief Refine BoxND by given (positive) refinement ratio.
690  * NOTE: if type(dir) = CELL centered: lo <- lo*ratio and
691  * hi <- (hi+1)*ratio - 1.
692  * NOTE: if type(dir) = NODE centered: lo <- lo*ratio and
693  * hi <- hi*ratio.
694  */
695  AMREX_GPU_HOST_DEVICE
696  BoxND& refine (const IntVectND<dim>& ref_ratio) noexcept;
697 
707  AMREX_GPU_HOST_DEVICE
708  BoxND& coarsen (int ref_ratio) noexcept {
709  return this->coarsen(IntVectND<dim>(ref_ratio));
710  }
711 
722  BoxND& coarsen (const IntVectND<dim>& ref_ratio) noexcept;
723 
729  void next (IntVectND<dim> &) const noexcept;
730 
737  static BoxND TheUnitBox () noexcept {
739  }
740 
741  [[nodiscard]] AMREX_GPU_HOST_DEVICE
742  bool isSquare() const noexcept;
743 
744  [[nodiscard]] AMREX_GPU_HOST_DEVICE
745  bool coarsenable(const IntVectND<dim>& refrat, const IntVectND<dim>& min_width) const noexcept
746  {
747  if (!size().allGE(refrat*min_width)) {
748  return false;
749  } else {
750  BoxND testBox = *this;
751  testBox.coarsen(refrat);
752  testBox.refine (refrat);
753  return (*this == testBox);
754  }
755  }
756 
757  [[nodiscard]] AMREX_GPU_HOST_DEVICE
758  bool coarsenable(int refrat, int min_width=1) const noexcept {
759  return coarsenable(IntVectND<dim>(refrat), IntVectND<dim>(min_width));
760  }
761 
762  [[nodiscard]] AMREX_GPU_HOST_DEVICE
763  bool coarsenable(const IntVectND<dim>& refrat, int min_width=1) const noexcept
764  {
765  return coarsenable(refrat, IntVectND<dim>(min_width));
766  }
767 
769  void normalize () noexcept
770  {
771  for (int idim=0; idim < dim; ++idim) {
772  if (this->length(idim) == 0) {
773  this->growHi(idim,1);
774  }
775  }
776  }
777 
779  BoxND& makeSlab (int direction, int slab_index) noexcept
780  {
781  smallend[direction] = slab_index;
782  bigend[direction] = slab_index;
783  return *this;
784  }
785 
787  static constexpr std::size_t ndims () noexcept {
788  return static_cast<std::size_t>(dim);
789  }
790 
792  static constexpr int indims () noexcept {
793  return dim;
794  }
795 
800  template<int new_dim>
802  BoxND<new_dim> shrink () const noexcept {
803  static_assert(new_dim <= dim);
804  auto lo = smallend.template shrink<new_dim>();
805  auto hi = bigend.template shrink<new_dim>();
806  auto typ = btype.template shrink<new_dim>();
807  return BoxND<new_dim>(lo, hi, typ);
808  }
809 
815  template<int new_dim>
817  BoxND<new_dim> expand () const noexcept {
818  static_assert(new_dim >= dim);
819  auto lo = smallend.template expand<new_dim>(0);
820  auto hi = bigend.template expand<new_dim>(0);
821  auto typ = btype.template expand<new_dim>(IndexType::CellIndex::CELL);
822  return BoxND<new_dim>(lo, hi, typ);
823  }
824 
829  template<int new_dim>
831  BoxND<new_dim> resize () const noexcept {
832  if constexpr (new_dim > dim) {
833  return expand<new_dim>();
834  } else {
835  return shrink<new_dim>();
836  }
837  }
838 
839 private:
843 };
844 
845 template<int dim>
848 BoxND<dim>&
849 BoxND<dim>::refine (const IntVectND<dim>& ref_ratio) noexcept
850 {
851  if (ref_ratio != 1) {
852  IntVectND<dim> shft(1);
853  shft -= btype.ixType();
854  smallend *= ref_ratio;
855  bigend += shft;
856  bigend *= ref_ratio;
857  bigend -= shft;
858  }
859  return *this;
860 }
861 
862 template<int dim>
865 BoxND<dim>&
866 BoxND<dim>::coarsen (const IntVectND<dim>& ref_ratio) noexcept
867 {
868  if (ref_ratio != 1)
869  {
870  smallend.coarsen(ref_ratio);
871 
872  if (btype.any())
873  {
874  IntVectND<dim> off(0);
875  for (int dir = 0; dir < dim; dir++)
876  {
877  if (btype[dir]) {
878  if (bigend[dir]%ref_ratio[dir]) {
879  off.setVal(dir,1);
880  }
881  }
882  }
883  bigend.coarsen(ref_ratio);
884  bigend += off;
885  }
886  else
887  {
888  bigend.coarsen(ref_ratio);
889  }
890  }
891 
892  return *this;
893 }
894 
895 template<int dim>
898 BoxND<dim>&
899 BoxND<dim>::convert (const IntVectND<dim>& typ) noexcept
900 {
901  BL_ASSERT(typ.allGE(0) && typ.allLE(1));
902  IntVectND<dim> shft(typ - btype.ixType());
903  bigend += shft;
904  btype = IndexTypeND<dim>(typ);
905  return *this;
906 }
907 
908 template<int dim>
911 BoxND<dim>&
913 {
914  for (int dir = 0; dir < dim; dir++)
915  {
916  const auto typ = t[dir];
917  const auto bitval = btype[dir];
918  const int off = typ - bitval;
919  bigend.shift(dir,off);
920  btype.setType(dir, static_cast<IndexType::CellIndex>(typ));
921  }
922  return *this;
923 }
924 
925 template<int dim>
928 BoxND<dim>&
930 {
931  if (!(btype[dir]))
932  {
933  bigend.shift(dir,1);
934  //
935  // Set dir'th bit to 1 = IndexType::NODE.
936  //
937  btype.set(dir);
938  }
939  return *this;
940 }
941 
942 template<int dim>
945 BoxND<dim>&
947 {
948  for (int i = 0; i < dim; ++i) {
949  if ((btype[i] == 0)) {
950  bigend.shift(i,1);
951  }
952  }
953  btype.setall();
954  return *this;
955 }
956 
957 template<int dim>
960 BoxND<dim>&
961 BoxND<dim>::enclosedCells (int dir) noexcept
962 {
963  if (btype[dir])
964  {
965  bigend.shift(dir,-1);
966  //
967  // Set dir'th bit to 0 = IndexType::CELL.
968  //
969  btype.unset(dir);
970  }
971  return *this;
972 }
973 
974 template<int dim>
977 BoxND<dim>&
979 {
980  for (int i = 0 ; i < dim; ++i) {
981  if (btype[i]) {
982  bigend.shift(i,-1);
983  }
984  }
985  btype.clear();
986  return *this;
987 }
988 
989 template<int dim>
992 Long
993 BoxND<dim>::index (const IntVectND<dim>& v) const noexcept
994 {
995  IntVectND<dim> vz = v - smallend;
996  Long result = vz[0];
997  Long mult = length(0);
998  for (int i = 1 ; i < dim; ++i) {
999  result += mult * vz[i];
1000  mult *= length(i);
1001  }
1002  return result;
1003 }
1004 
1005 template<int dim>
1009 BoxND<dim>::atOffset (Long offset) const noexcept
1010 {
1011  IntVectND<dim> result = smallend;
1012 
1013  if constexpr (dim > 1) {
1014  GpuArray<Long, dim-1> mult{};
1015  mult[0] = length(0);
1016  for (int i = 1 ; i < dim-1; ++i) {
1017  mult[i] = mult[i-1] * length(i);
1018  }
1019  for (int i = dim-1 ; i > 0; --i) {
1020  Long idx = offset / mult[i-1];
1021  offset -= idx * mult[i-1];
1022  result[i] += static_cast<int>(idx);
1023  }
1024  }
1025 
1026  result[0] += static_cast<int>(offset);
1027 
1028  return result;
1029 }
1030 
1031 template<int dim>
1032 template <int N, std::enable_if_t<( 1<=N && N<=3 ), int>>
1033 AMREX_GPU_HOST_DEVICE
1034 AMREX_FORCE_INLINE
1035 GpuArray<int,3>
1036 BoxND<dim>::atOffset3d (Long offset) const noexcept
1037 {
1038  Dim3 iv3d = atOffset(offset).dim3(0);
1039  return {{iv3d.x, iv3d.y, iv3d.z}};
1040 }
1041 
1042 template<int dim>
1045 BoxND<dim>&
1047  int sm_index,
1048  int n_cells) noexcept
1049 {
1050  smallend.setVal(dir,sm_index);
1051  bigend.setVal(dir,sm_index+n_cells-1);
1052  return *this;
1053 }
1054 
1055 template<int dim>
1058 void
1059 BoxND<dim>::next (IntVectND<dim>& p) const noexcept // NOLINT(readability-convert-member-functions-to-static)
1060 {
1061  BL_ASSERT(contains(p));
1062 
1063  ++p[0];
1064 
1065  for (int i = 0 ; i < dim-1; ++i) {
1066  if (p[i] > bigend[i]) {
1067  p[i] = smallend[i];
1068  ++p[i+1];
1069  } else {
1070  break;
1071  }
1072  }
1073 }
1074 
1075 template<int dim>
1078 bool
1079 BoxND<dim>::isSquare () const noexcept // NOLINT(readability-convert-member-functions-to-static)
1080 {
1081  if constexpr (dim == 1) {
1082  return false; // can't build a square in 1-D
1083  } else {
1084  bool is_square = true;
1085  const IntVectND<dim>& sz = this->size();
1086  for (int i = 0 ; i < dim-1; ++i) {
1087  is_square = is_square && (sz[i] == sz[i+1]);
1088  }
1089  return is_square;
1090  }
1091 }
1092 
1093 //
1094 // Modified BoxND is low end, returned BoxND is high end.
1095 // If CELL: chop_pnt included in high end.
1096 // If NODE: chop_pnt included in both Boxes.
1097 //
1098 
1099 template<int dim>
1101 inline
1102 BoxND<dim>
1103 BoxND<dim>::chop (int dir, int chop_pnt) noexcept
1104 {
1105  //
1106  // Define new high end BoxND including chop_pnt.
1107  //
1108  IntVectND<dim> sm(smallend);
1109  IntVectND<dim> bg(bigend);
1110  sm.setVal(dir,chop_pnt);
1111  if (btype[dir])
1112  {
1113  //
1114  // NODE centered BoxND.
1115  //
1116  BL_ASSERT(chop_pnt > smallend[dir] && chop_pnt < bigend[dir]);
1117  //
1118  // Shrink original BoxND to just contain chop_pnt.
1119  //
1120  bigend.setVal(dir,chop_pnt);
1121  }
1122  else
1123  {
1124  //
1125  // CELL centered BoxND.
1126  //
1127  BL_ASSERT(chop_pnt > smallend[dir] && chop_pnt <= bigend[dir]);
1128  //
1129  // Shrink original BoxND to one below chop_pnt.
1130  //
1131  bigend.setVal(dir,chop_pnt-1);
1132  }
1133  return BoxND<dim>(sm,bg,btype);
1134 }
1135 
1136 template<int dim>
1138 inline
1139 BoxND<dim>&
1140 BoxND<dim>::shiftHalf (int dir, int num_halfs) noexcept
1141 {
1142  const int nbit = (num_halfs<0 ? -num_halfs : num_halfs)%2;
1143  int nshift = num_halfs/2;
1144  //
1145  // Toggle btyp bit if num_halfs is odd.
1146  //
1147  const unsigned int bit_dir = btype[dir];
1148  if (nbit) {
1149  btype.flip(dir);
1150  }
1151  if (num_halfs < 0) {
1152  nshift -= (bit_dir ? nbit : 0);
1153  } else {
1154  nshift += (bit_dir ? 0 : nbit);
1155  }
1156  smallend.shift(dir,nshift);
1157  bigend.shift(dir,nshift);
1158  return *this;
1159 }
1160 
1161 template<int dim>
1163 inline
1164 BoxND<dim>&
1166 {
1167  for (int i = 0; i < dim; i++) {
1168  shiftHalf(i,iv[i]);
1169  }
1170  return *this;
1171 }
1172 
1174 {
1175 public:
1176 
1177  explicit BoxCommHelper (const Box& bx, int* p_ = nullptr);
1178 
1179  [[nodiscard]] int* data () const& noexcept { return p; }
1180  int* data () && = delete;
1181 
1182  [[nodiscard]] Box make_box () const noexcept {
1183  return Box(IntVect(p), IntVect(p+AMREX_SPACEDIM), IntVect(p+2*AMREX_SPACEDIM));
1184  }
1185 
1186  [[nodiscard]] static int size () noexcept { return 3*AMREX_SPACEDIM; }
1187 
1188 private:
1189  int* p;
1190  std::vector<int> v;
1191 };
1192 
1193 class BoxConverter { // NOLINT
1194 public:
1195  virtual Box doit (const Box& fine) const = 0; // NOLINT
1196  virtual BoxConverter* clone () const = 0; // NOLINT
1197  virtual ~BoxConverter () = default;
1198 };
1199 
1200 void AllGatherBoxes (Vector<Box>& bxs, int n_extra_reserve=0);
1201 
1207 template<int dim>
1208 [[nodiscard]]
1211 BoxND<dim> grow (const BoxND<dim>& b, int i) noexcept
1212 {
1213  BoxND<dim> result = b;
1214  result.grow(i);
1215  return result;
1216 }
1217 
1219 template<int dim>
1220 [[nodiscard]]
1223 BoxND<dim> grow (const BoxND<dim>& b, const IntVectND<dim>& v) noexcept
1224 {
1225  BoxND<dim> result = b;
1226  result.grow(v);
1227  return result;
1228 }
1229 
1231 template<int dim>
1232 [[nodiscard]]
1235 BoxND<dim> grow (const BoxND<dim>& b, int idir, int n_cell) noexcept
1236 {
1237  BoxND<dim> result = b;
1238  result.grow(idir, n_cell);
1239  return result;
1240 }
1241 
1242 template<int dim>
1243 [[nodiscard]]
1246 BoxND<dim> grow (const BoxND<dim>& b, Direction d, int n_cell) noexcept
1247 {
1248  return grow(b, static_cast<int>(d), n_cell);
1249 }
1250 
1251 template<int dim>
1252 [[nodiscard]]
1255 BoxND<dim> growLo (const BoxND<dim>& b, int idir, int n_cell) noexcept
1256 {
1257  BoxND<dim> result = b;
1258  result.growLo(idir, n_cell);
1259  return result;
1260 }
1261 
1262 template<int dim>
1263 [[nodiscard]]
1266 BoxND<dim> growLo (const BoxND<dim>& b, Direction d, int n_cell) noexcept
1267 {
1268  return growLo(b, static_cast<int>(d), n_cell);
1269 }
1270 
1271 template<int dim>
1272 [[nodiscard]]
1275 BoxND<dim> growHi (const BoxND<dim>& b, int idir, int n_cell) noexcept
1276 {
1277  BoxND<dim> result = b;
1278  result.growHi(idir, n_cell);
1279  return result;
1280 }
1281 
1282 template<int dim>
1283 [[nodiscard]]
1286 BoxND<dim> growHi (const BoxND<dim>& b, Direction d, int n_cell) noexcept
1287 {
1288  return growHi(b, static_cast<int>(d), n_cell);
1289 }
1290 
1300 template<int dim>
1301 [[nodiscard]]
1304 BoxND<dim> coarsen (const BoxND<dim>& b, int ref_ratio) noexcept
1305 {
1306  BoxND<dim> result = b;
1307  result.coarsen(IntVectND<dim>(ref_ratio));
1308  return result;
1309 }
1310 
1320 template<int dim>
1321 [[nodiscard]]
1324 BoxND<dim> coarsen (const BoxND<dim>& b, const IntVectND<dim>& ref_ratio) noexcept
1325 {
1326  BoxND<dim> result = b;
1327  result.coarsen(ref_ratio);
1328  return result;
1329 }
1330 
1338 template<int dim>
1339 [[nodiscard]]
1342 BoxND<dim> refine (const BoxND<dim>& b, int ref_ratio) noexcept
1343 {
1344  BoxND<dim> result = b;
1345  result.refine(IntVectND<dim>(ref_ratio));
1346  return result;
1347 }
1348 
1356 template<int dim>
1357 [[nodiscard]]
1360 BoxND<dim> refine (const BoxND<dim>& b, const IntVectND<dim>& ref_ratio) noexcept
1361 {
1362  BoxND<dim> result = b;
1363  result.refine(ref_ratio);
1364  return result;
1365 }
1366 
1368 template<int dim>
1369 [[nodiscard]]
1372 BoxND<dim> shift (const BoxND<dim>& b, int dir, int nzones) noexcept
1373 {
1374  BoxND<dim> result = b;
1375  result.shift(dir, nzones);
1376  return result;
1377 }
1378 
1379 template<int dim>
1380 [[nodiscard]]
1383 BoxND<dim> shift (const BoxND<dim>& b, const IntVectND<dim>& nzones) noexcept
1384 {
1385  BoxND<dim> result = b;
1386  result.shift(nzones);
1387  return result;
1388 }
1389 
1395 template<int dim>
1396 [[nodiscard]]
1399 BoxND<dim> surroundingNodes (const BoxND<dim>& b, int dir) noexcept
1400 {
1401  BoxND<dim> bx(b);
1402  bx.surroundingNodes(dir);
1403  return bx;
1404 }
1405 
1406 template<int dim>
1407 [[nodiscard]]
1411 {
1412  return surroundingNodes(b, static_cast<int>(d));
1413 }
1414 
1419 template<int dim>
1420 [[nodiscard]]
1424 {
1425  BoxND<dim> bx(b);
1426  bx.surroundingNodes();
1427  return bx;
1428 }
1429 
1431 template<int dim>
1432 [[nodiscard]]
1435 BoxND<dim> convert (const BoxND<dim>& b, const IntVectND<dim>& typ) noexcept
1436 {
1437  BoxND<dim> bx(b);
1438  bx.convert(typ);
1439  return bx;
1440 }
1441 
1442 template<int dim>
1443 [[nodiscard]]
1446 BoxND<dim> convert (const BoxND<dim>& b, const IndexTypeND<dim>& typ) noexcept
1447 {
1448  BoxND<dim> bx(b);
1449  bx.convert(typ);
1450  return bx;
1451 }
1452 
1459 template<int dim>
1460 [[nodiscard]]
1463 BoxND<dim> enclosedCells (const BoxND<dim>& b, int dir) noexcept
1464 {
1465  BoxND<dim> bx(b);
1466  bx.enclosedCells(dir);
1467  return bx;
1468 }
1469 
1470 template<int dim>
1471 [[nodiscard]]
1475 {
1476  return enclosedCells(b, static_cast<int>(d));
1477 }
1478 
1483 template<int dim>
1484 [[nodiscard]]
1488 {
1489  BoxND<dim> bx(b);
1490  bx.enclosedCells();
1491  return bx;
1492 }
1493 
1498 template<int dim>
1499 [[nodiscard]]
1502 BoxND<dim> bdryLo (const BoxND<dim>& b, int dir, int len=1) noexcept
1503 {
1504  IntVectND<dim> low(b.smallEnd());
1505  IntVectND<dim> hi(b.bigEnd());
1506  int sm = low[dir];
1507  low.setVal(dir,sm-len+1);
1508  hi.setVal(dir,sm);
1509  //
1510  // set dir'th bit to 1 = IndexType::NODE.
1511  //
1512  IndexTypeND<dim> typ(b.ixType());
1513  typ.set(dir);
1514  return BoxND<dim>(low,hi,typ);
1515 }
1516 
1521 template<int dim>
1522 [[nodiscard]]
1525 BoxND<dim> bdryHi (const BoxND<dim>& b, int dir, int len=1) noexcept
1526 {
1527  IntVectND<dim> low(b.smallEnd());
1528  IntVectND<dim> hi(b.bigEnd());
1529  auto const bitval = b.type()[dir];
1530  int bg = hi[dir] + 1 - bitval%2;
1531  low.setVal(dir,bg);
1532  hi.setVal(dir,bg+len-1);
1533  //
1534  // Set dir'th bit to 1 = IndexType::NODE.
1535  //
1536  IndexTypeND<dim> typ(b.ixType());
1537  typ.set(dir);
1538  return BoxND<dim>(low,hi,typ);
1539 }
1540 
1545 template<int dim>
1546 [[nodiscard]]
1549 BoxND<dim> bdryNode (const BoxND<dim>& b, Orientation face, int len=1) noexcept
1550 {
1551  int dir = face.coordDir();
1552  IntVectND<dim> low(b.smallEnd());
1553  IntVectND<dim> hi(b.bigEnd());
1554  if (face.isLow())
1555  {
1556  int sm = low[dir];
1557  low.setVal(dir,sm-len+1);
1558  hi.setVal(dir,sm);
1559  }
1560  else
1561  {
1562  int bitval = b.type()[dir];
1563  int bg = hi[dir] + 1 - bitval%2;
1564  low.setVal(dir,bg);
1565  hi.setVal(dir,bg+len-1);
1566  }
1567  //
1568  // Set dir'th bit to 1 = IndexType::NODE.
1569  //
1570  IndexTypeND<dim> typ(b.ixType());
1571  typ.set(dir);
1572  return BoxND<dim>(low,hi,typ);
1573 }
1574 
1587 template<int dim>
1588 [[nodiscard]]
1591 BoxND<dim> adjCellLo (const BoxND<dim>& b, int dir, int len=1) noexcept
1592 {
1593  BL_ASSERT(len > 0);
1594  IntVectND<dim> low(b.smallEnd());
1595  IntVectND<dim> hi(b.bigEnd());
1596  int sm = low[dir];
1597  low.setVal(dir,sm - len);
1598  hi.setVal(dir,sm - 1);
1599  //
1600  // Set dir'th bit to 0 = IndexType::CELL.
1601  //
1602  IndexTypeND<dim> typ(b.ixType());
1603  typ.unset(dir);
1604  return BoxND<dim>(low,hi,typ);
1605 }
1606 
1608 template<int dim>
1609 [[nodiscard]]
1612 BoxND<dim> adjCellHi (const BoxND<dim>& b, int dir, int len=1) noexcept
1613 {
1614  BL_ASSERT(len > 0);
1615  IntVectND<dim> low(b.smallEnd());
1616  IntVectND<dim> hi(b.bigEnd());
1617  int bitval = b.type()[dir];
1618  int bg = hi[dir] + 1 - bitval%2;
1619  low.setVal(dir,bg);
1620  hi.setVal(dir,bg + len - 1);
1621  //
1622  // Set dir'th bit to 0 = IndexType::CELL.
1623  //
1624  IndexTypeND<dim> typ(b.ixType());
1625  typ.unset(dir);
1626  return BoxND<dim>(low,hi,typ);
1627 }
1628 
1630 template<int dim>
1631 [[nodiscard]]
1634 BoxND<dim> adjCell (const BoxND<dim>& b, Orientation face, int len=1) noexcept
1635 {
1636  BL_ASSERT(len > 0);
1637  IntVectND<dim> low(b.smallEnd());
1638  IntVectND<dim> hi(b.bigEnd());
1639  int dir = face.coordDir();
1640  if (face.isLow())
1641  {
1642  int sm = low[dir];
1643  low.setVal(dir,sm - len);
1644  hi.setVal(dir,sm - 1);
1645  }
1646  else
1647  {
1648  int bitval = b.type()[dir];
1649  int bg = hi[dir] + 1 - bitval%2;
1650  low.setVal(dir,bg);
1651  hi.setVal(dir,bg + len - 1);
1652  }
1653  //
1654  // Set dir'th bit to 0 = IndexType::CELL.
1655  //
1656  IndexTypeND<dim> typ(b.ixType());
1657  typ.unset(dir);
1658  return BoxND<dim>(low,hi,typ);
1659 }
1660 
1666 template<int dim>
1667 [[nodiscard]]
1670 BoxND<dim> minBox (const BoxND<dim>& b1, const BoxND<dim>& b2) noexcept
1671 {
1672  BoxND<dim> result = b1;
1673  result.minBox(b2);
1674  return result;
1675 }
1676 
1677 namespace detail {
1678  std::ostream& box_write (std::ostream& os, const int * smallend, const int * bigend,
1679  const int * type, int dim);
1680  std::istream& box_read (std::istream& is, int * smallend, int * bigend, int * type, int dim);
1681 
1682  template<std::size_t...Ns, class T, class U>
1684  auto BoxSplit_imp (std::index_sequence<Ns...>,
1685  const T& lo, const T& hi, const U& typ) noexcept {
1686  return makeTuple(BoxND(get<Ns>(lo), get<Ns>(hi), get<Ns>(typ))...);
1687  }
1688 }
1689 
1691 template<int dim>
1692 std::ostream& operator<< (std::ostream& os, const BoxND<dim>& bx)
1693 {
1694  IntVectND<dim> type = bx.type();
1695  return detail::box_write(os, bx.smallEnd().begin(), bx.bigEnd().begin(), type.begin(), dim);
1696 }
1697 
1699 template<int dim>
1700 std::istream& operator>> (std::istream& is, BoxND<dim>& bx) {
1701  IntVectND<dim> small;
1702  IntVectND<dim> big;
1703  IntVectND<dim> type;
1704  detail::box_read(is, small.begin(), big.begin(), type.begin(), dim);
1705  bx = BoxND<dim>{small, big, type};
1706  return is;
1707 }
1708 
1713 template<int d, int...dims>
1716 constexpr BoxND<detail::get_sum<d, dims...>()>
1717 BoxCat (const BoxND<d>& bx, const BoxND<dims>&...boxes) noexcept {
1718  auto lo = IntVectCat(bx.smallEnd(), boxes.smallEnd()...);
1719  auto hi = IntVectCat(bx.bigEnd(), boxes.bigEnd()...);
1720  auto typ = IndexTypeCat(bx.ixType(), boxes.ixType()...);
1721  return BoxND<detail::get_sum<d, dims...>()>{lo, hi, typ};
1722 }
1723 
1728 template<int d, int...dims>
1731 constexpr GpuTuple<BoxND<d>, BoxND<dims>...>
1732 BoxSplit (const BoxND<detail::get_sum<d, dims...>()>& bx) noexcept {
1733  auto lo = IntVectSplit<d, dims...>(bx.smallEnd());
1734  auto hi = IntVectSplit<d, dims...>(bx.bigEnd());
1735  auto typ = IndexTypeSplit<d, dims...>(bx.ixType());
1736  return detail::BoxSplit_imp(std::make_index_sequence<1 + sizeof...(dims)>(), lo, hi, typ);
1737 }
1738 
1743 template<int new_dim, int old_dim>
1746 constexpr BoxND<new_dim>
1747 BoxShrink (const BoxND<old_dim>& bx) noexcept {
1748  return bx.template shrink<new_dim>();
1749 }
1750 
1756 template<int new_dim, int old_dim>
1759 constexpr BoxND<new_dim>
1760 BoxExpand (const BoxND<old_dim>& bx) noexcept {
1761  return bx.template expand<new_dim>();
1762 }
1763 
1768 template<int new_dim, int old_dim>
1771 constexpr BoxND<new_dim>
1772 BoxResize (const BoxND<old_dim>& bx) noexcept {
1773  return bx.template resize<new_dim>();
1774 }
1775 
1776 template<int dim>
1777 [[nodiscard]]
1780 IntVectND<dim> lbound_iv (BoxND<dim> const& box) noexcept
1781 {
1782  return box.smallEnd();
1783 }
1784 
1785 template<int dim>
1786 [[nodiscard]]
1789 IntVectND<dim> ubound_iv (BoxND<dim> const& box) noexcept
1790 {
1791  return box.bigEnd();
1792 }
1793 
1794 template<int dim>
1795 [[nodiscard]]
1798 IntVectND<dim> begin_iv (BoxND<dim> const& box) noexcept
1799 {
1800  return box.smallEnd();
1801 }
1802 
1803 template<int dim>
1804 [[nodiscard]]
1807 IntVectND<dim> end_iv (BoxND<dim> const& box) noexcept
1808 {
1809  return box.bigEnd() + 1;
1810 }
1811 
1812 template<int dim>
1813 [[nodiscard]]
1816 IntVectND<dim> length_iv (BoxND<dim> const& box) noexcept
1817 {
1818  return box.bigEnd() - box.smallEnd() + 1;
1819 }
1820 
1821 // Max of lower bound
1822 template<int dim>
1823 [[nodiscard]]
1826 IntVectND<dim> max_lbound_iv (BoxND<dim> const& b1, BoxND<dim> const& b2) noexcept
1827 {
1828  return max(b1.smallEnd(), b2.smallEnd());
1829 }
1830 
1831 template<int dim>
1832 [[nodiscard]]
1836 {
1837  return max(b1.smallEnd(), lo);
1838 }
1839 
1840 // Min of upper bound
1841 template<int dim>
1842 [[nodiscard]]
1845 IntVectND<dim> min_ubound_iv (BoxND<dim> const& b1, BoxND<dim> const& b2) noexcept
1846 {
1847  return min(b1.bigEnd(), b2.bigEnd());
1848 }
1849 
1850 template<int dim>
1851 [[nodiscard]]
1855 {
1856  return min(b1.bigEnd(), hi);
1857 }
1858 
1859 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1860 [[nodiscard]]
1861 AMREX_GPU_HOST_DEVICE
1862 AMREX_FORCE_INLINE
1863 Dim3 lbound (BoxND<dim> const& box) noexcept
1864 {
1865  return box.smallEnd().dim3();
1866 }
1867 
1868 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1869 [[nodiscard]]
1870 AMREX_GPU_HOST_DEVICE
1871 AMREX_FORCE_INLINE
1872 Dim3 ubound (BoxND<dim> const& box) noexcept
1873 {
1874  return box.bigEnd().dim3();
1875 }
1876 
1877 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1878 [[nodiscard]]
1879 AMREX_GPU_HOST_DEVICE
1880 AMREX_FORCE_INLINE
1881 Dim3 begin (BoxND<dim> const& box) noexcept
1882 {
1883  return box.smallEnd().dim3();
1884 }
1885 
1886 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1887 [[nodiscard]]
1888 AMREX_GPU_HOST_DEVICE
1889 AMREX_FORCE_INLINE
1890 Dim3 end (BoxND<dim> const& box) noexcept
1891 {
1892  return (box.bigEnd() + 1).dim3(1);
1893 }
1894 
1895 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1896 [[nodiscard]]
1897 AMREX_GPU_HOST_DEVICE
1898 AMREX_FORCE_INLINE
1899 Dim3 length (BoxND<dim> const& box) noexcept
1900 {
1901  return (box.bigEnd() - box.smallEnd() + 1).dim3(1);
1902 }
1903 
1904 // Max of lower bound
1905 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1906 [[nodiscard]]
1907 AMREX_GPU_HOST_DEVICE
1908 AMREX_FORCE_INLINE
1909 Dim3 max_lbound (BoxND<dim> const& b1, BoxND<dim> const& b2) noexcept
1910 {
1911  return max(b1.smallEnd(), b2.smallEnd()).dim3();
1912 }
1913 
1914 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1915 [[nodiscard]]
1916 AMREX_GPU_HOST_DEVICE
1917 AMREX_FORCE_INLINE
1918 Dim3 max_lbound (BoxND<dim> const& b1, Dim3 const& lo) noexcept
1919 {
1920  return max(b1.smallEnd(), IntVectND<dim>(lo)).dim3();
1921 }
1922 
1923 // Min of upper bound
1924 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1925 [[nodiscard]]
1926 AMREX_GPU_HOST_DEVICE
1927 AMREX_FORCE_INLINE
1928 Dim3 min_ubound (BoxND<dim> const& b1, BoxND<dim> const& b2) noexcept
1929 {
1930  return min(b1.bigEnd(), b2.bigEnd()).dim3();
1931 }
1932 
1933 template<int dim, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
1934 [[nodiscard]]
1935 AMREX_GPU_HOST_DEVICE
1936 AMREX_FORCE_INLINE
1937 Dim3 min_ubound (BoxND<dim> const& b1, Dim3 const& hi) noexcept
1938 {
1939  return min(b1.bigEnd(), IntVectND<dim>(hi)).dim3();
1940 }
1941 
1942 // Returns a BoxND that covers all the argument Boxes in index
1943 // space. The types are ignored. Thus, the arguments can have
1944 // different index types, and the returned BoxND's index type has no
1945 // meaning.
1946 template<int dim>
1947 [[nodiscard]]
1950 {
1951  return b1;
1952 }
1953 
1954 template<int dim>
1955 [[nodiscard]]
1957 BoxND<dim> getIndexBounds (BoxND<dim> const& b1, BoxND<dim> const& b2) noexcept
1958 {
1959  BoxND<dim> b = b1;
1960  b.setType(b2.ixType());
1961  b.minBox(b2);
1962  return b;
1963 }
1964 
1965 template <class T, class ... Ts>
1966 [[nodiscard]]
1968 auto getIndexBounds (T const& b1, T const& b2, Ts const& ... b3) noexcept
1969 {
1970  return getIndexBounds(getIndexBounds(b1,b2),b3...);
1971 }
1972 
1973 
1974 template<int dim>
1975 [[nodiscard]]
1978 IntVectND<dim> getCell (BoxND<dim> const* boxes, int nboxes, Long icell) noexcept
1979 {
1980  int ibox;
1981  Long ncells_subtotal = 0;
1982  Long offset = 0;
1983  for (ibox = 0; ibox < nboxes; ++ibox) {
1984  const Long n = boxes[ibox].numPts();
1985  ncells_subtotal += n;
1986  if (icell < ncells_subtotal) {
1987  offset = icell - (ncells_subtotal - n);
1988  break;
1989  }
1990  }
1991  return boxes[ibox].atOffset(offset);
1992 }
1993 
1994 template<int dim>
1995 [[nodiscard]]
1998 BoxND<dim> makeSlab (BoxND<dim> const& b, int direction, int slab_index) noexcept
1999 {
2000  BoxND<dim> r = b;
2001  r.makeSlab(direction,slab_index);
2002  return r;
2003 }
2004 
2005 template<int dim=AMREX_SPACEDIM, std::enable_if_t<( 1<=dim && dim<=3 ), int> = 0>
2006 [[nodiscard]]
2007 AMREX_GPU_HOST_DEVICE
2008 AMREX_FORCE_INLINE
2009 BoxND<dim> makeSingleCellBox (int i, int j, int k, IndexTypeND<dim> typ = IndexTypeND<dim>::TheCellType())
2010 {
2011  Dim3 p3d{i, j, k};
2012  IntVectND<dim> vect{p3d};
2013  return BoxND<dim>{vect, vect, typ};
2014 }
2015 
2016 template<int dim>
2017 [[nodiscard]]
2021 {
2022  return BoxND<dim>{vect, vect, typ};
2023 }
2024 
2025 template<int dim>
2027 {
2028  std::uint64_t npts;
2031 
2033  : npts(box.numPts()),
2034  lo (box.smallEnd())
2035  {
2036  std::uint64_t mult = 1;
2037  for (int i=0; i<dim-1; ++i) {
2038  mult *= box.length(i);
2039  fdm[i] = Math::FastDivmodU64(mult);
2040  }
2041  }
2042 
2044  IntVectND<dim> intVect (std::uint64_t icell) const
2045  {
2046  IntVectND<dim> retval = lo;
2047 
2048  for (int i=dim-1; i>0; --i) {
2049  std::uint64_t quotient, remainder;
2050  fdm[i-1](quotient, remainder, icell);
2051  retval[i] += quotient;
2052  icell = remainder;
2053  }
2054 
2055  retval[0] += icell;
2056 
2057  return retval;
2058  }
2059 
2060  template <int N=dim, std::enable_if_t<( 1<=N && N<=3 ), int> = 0>
2061  [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE
2062  Dim3 operator() (std::uint64_t icell) const
2063  {
2064  return intVect(icell).dim3();
2065  }
2066 
2067  [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE
2068  std::uint64_t numPts () const { return npts; }
2069 };
2070 
2071 template<>
2073 {
2074  std::uint64_t npts;
2075 
2076  int lo;
2077 
2078  BoxIndexerND (BoxND<1> const& box)
2079  : npts(box.numPts()),
2080  lo(box.smallEnd(0))
2081  {}
2082 
2084  IntVectND<1> intVect (std::uint64_t icell) const
2085  {
2086  return IntVectND<1>{int(icell)+lo};
2087  }
2088 
2090  Dim3 operator() (std::uint64_t icell) const
2091  {
2092  return {int(icell)+lo, 0, 0};
2093  }
2094 
2096  std::uint64_t numPts () const { return npts; }
2097 };
2098 
2100 
2101 }
2102 
2103 #endif /*AMREX_BOX_H*/
#define BL_ASSERT(EX)
Definition: AMReX_BLassert.H:39
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
int idir
Definition: AMReX_HypreMLABecLap.cpp:1093
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
Array4< Real > fine
Definition: AMReX_InterpFaceRegister.cpp:90
int MPI_Datatype
Definition: AMReX_ccse-mpi.H:49
Definition: AMReX_Box.H:1174
int * p
Definition: AMReX_Box.H:1189
int * data() &&=delete
Box make_box() const noexcept
Definition: AMReX_Box.H:1182
int * data() const &noexcept
Definition: AMReX_Box.H:1179
std::vector< int > v
Definition: AMReX_Box.H:1190
static int size() noexcept
Definition: AMReX_Box.H:1186
Definition: AMReX_Box.H:1193
virtual Box doit(const Box &fine) const =0
virtual ~BoxConverter()=default
virtual BoxConverter * clone() const =0
A Rectangular Domain on an Integer Lattice.
Definition: AMReX_Box.H:43
AMREX_GPU_HOST_DEVICE BoxND & surroundingNodes() noexcept
Convert to NODE type in all directions.
Definition: AMReX_Box.H:946
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< new_dim > resize() const noexcept
Returns a new BoxND of size new_dim by either shrinking or expanding this BoxND.
Definition: AMReX_Box.H:831
IntVectND< dim > smallend
Definition: AMReX_Box.H:840
AMREX_GPU_HOST_DEVICE BoxND & minBox(const BoxND &b) noexcept
Modify BoxND to that of the minimum BoxND containing both the original BoxND and the argument....
Definition: AMReX_Box.H:583
AMREX_GPU_HOST_DEVICE IntVectND< dim > atOffset(Long offset) const noexcept
Given the offset, compute IntVectND<dim>
Definition: AMReX_Box.H:1009
AMREX_GPU_HOST_DEVICE const IntVectND< dim > & smallEnd() const &noexcept
Get the smallend of the BoxND.
Definition: AMReX_Box.H:105
static AMREX_GPU_HOST_DEVICE BoxND TheUnitBox() noexcept
This static member function returns a constant reference to an object of type BoxND representing the ...
Definition: AMReX_Box.H:737
AMREX_GPU_HOST_DEVICE BoxND & refine(int ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition: AMReX_Box.H:684
IndexTypeND< dim > btype
Definition: AMReX_Box.H:842
AMREX_GPU_HOST_DEVICE bool isSquare() const noexcept
Definition: AMReX_Box.H:1079
AMREX_GPU_HOST_DEVICE BoxND & shift(int dir, int nzones) noexcept
Shift this BoxND nzones indexing positions in coordinate direction dir.
Definition: AMReX_Box.H:496
AMREX_GPU_HOST_DEVICE BoxND(const IntVectND< dim > &small, const int *vec_len) noexcept
Construct BoxND with specified lengths.
Definition: AMReX_Box.H:68
AMREX_GPU_HOST_DEVICE void next(IntVectND< dim > &) const noexcept
Step through the rectangle. It is a runtime error to give a point not inside rectangle....
Definition: AMReX_Box.H:1059
constexpr AMREX_GPU_HOST_DEVICE BoxND(const IntVectND< dim > &small, const IntVectND< dim > &big) noexcept
Construct cell-centered type BoxND.
Definition: AMReX_Box.H:61
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< new_dim > shrink() const noexcept
Returns a new BoxND of dimension new_dim and assigns the first new_dim dimension of this BoxND to it.
Definition: AMReX_Box.H:802
AMREX_GPU_HOST_DEVICE IndexTypeND< dim > ixType() const noexcept
Returns the indexing type.
Definition: AMReX_Box.H:127
AMREX_GPU_HOST_DEVICE BoxND & makeSlab(int direction, int slab_index) noexcept
Definition: AMReX_Box.H:779
AMREX_GPU_HOST_DEVICE BoxND & convert(IndexTypeND< dim > typ) noexcept
Convert the BoxND from the current type into the argument type. This may change the BoxND coordinates...
Definition: AMReX_Box.H:912
AMREX_GPU_HOST_DEVICE IntVectND< dim > type() const noexcept
Returns the indexing type.
Definition: AMReX_Box.H:131
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< new_dim > expand() const noexcept
Returns a new BoxND of size new_dim and assigns all values of this BoxND to it and (small=0,...
Definition: AMReX_Box.H:817
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE int indims() noexcept
Definition: AMReX_Box.H:792
AMREX_GPU_HOST_DEVICE bool coarsenable(int refrat, int min_width=1) const noexcept
Definition: AMReX_Box.H:758
AMREX_GPU_HOST_DEVICE const IntVectND< dim > & bigEnd() const &noexcept
Get the bigend.
Definition: AMReX_Box.H:116
AMREX_GPU_HOST_DEVICE BoxND & enclosedCells() noexcept
Convert to CELL type in all directions.
Definition: AMReX_Box.H:978
IntVectND< dim > bigend
Definition: AMReX_Box.H:841
AMREX_GPU_HOST_DEVICE void normalize() noexcept
Definition: AMReX_Box.H:769
AMREX_GPU_HOST_DEVICE BoxND & coarsen(const IntVectND< dim > &ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:866
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool coarsenable(const IntVectND< dim > &refrat, int min_width=1) const noexcept
Definition: AMReX_Box.H:763
AMREX_GPU_HOST_DEVICE BoxND & growHi(int idir, int n_cell=1) noexcept
Grow the BoxND on the high end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the B...
Definition: AMReX_Box.H:659
AMREX_GPU_HOST_DEVICE BoxND(const IntVectND< dim > &small, const IntVectND< dim > &big, const IntVectND< dim > &typ) noexcept
Construct BoxND with given type. small and big are expected to be consistent with given type.
Definition: AMReX_Box.H:78
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE std::size_t ndims() noexcept
Definition: AMReX_Box.H:787
AMREX_GPU_HOST_DEVICE BoxND & growLo(int idir, int n_cell=1) noexcept
Grow the BoxND on the low end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the Bo...
Definition: AMReX_Box.H:648
AMREX_GPU_HOST_DEVICE BoxND(const IntVectND< dim > &small, const IntVectND< dim > &big, IndexTypeND< dim > t) noexcept
Construct dimension specific Boxes.
Definition: AMReX_Box.H:88
constexpr AMREX_GPU_HOST_DEVICE BoxND() noexcept
Definition: AMReX_Box.H:54
AMREX_GPU_HOST_DEVICE BoxND & coarsen(int ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:708
AMREX_GPU_HOST_DEVICE Long numPts() const noexcept
Returns the number of points contained in the BoxND.
Definition: AMReX_Box.H:346
Cell-Based or Node-Based Indices.
Definition: AMReX_IndexType.H:33
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void unset(int dir) noexcept
Set IndexTypeND to be CELL based in direction dir.
Definition: AMReX_IndexType.H:72
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void set(int dir) noexcept
Set IndexTypeND to be NODE based in direction dir.
Definition: AMReX_IndexType.H:69
Definition: AMReX_IntVect.H:48
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > & coarsen(const IntVectND< dim > &p) noexcept
Modify IntVectND<dim> by component-wise integer projection.
Definition: AMReX_IntVect.H:844
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND & setVal(int i, int val) noexcept
Set i'th coordinate of IntVectND to val.
Definition: AMReX_IntVect.H:274
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE int * begin() noexcept
Returns a pointer to the first element of the IntVectND.
Definition: AMReX_IntVect.H:258
Encapsulation of the Orientation of the Faces of a Box.
Definition: AMReX_Orientation.H:29
AMREX_GPU_HOST_DEVICE int coordDir() const noexcept
Returns the coordinate direction.
Definition: AMReX_Orientation.H:83
AMREX_GPU_HOST_DEVICE bool isLow() const noexcept
Returns true if Orientation is low.
Definition: AMReX_Orientation.H:89
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
AMREX_GPU_HOST_DEVICE Long size(T const &b) noexcept
integer version
Definition: AMReX_GpuRange.H:26
@ min
Definition: AMReX_ParallelReduce.H:18
@ max
Definition: AMReX_ParallelReduce.H:17
std::istream & box_read(std::istream &is, int *smallend, int *bigend, int *type, int dim)
Definition: AMReX_Box.cpp:44
std::ostream & box_write(std::ostream &os, const int *smallend, const int *bigend, const int *type, int dim)
Definition: AMReX_Box.cpp:20
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE int get_sum()
Definition: AMReX_IntVect.H:1132
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE auto BoxSplit_imp(std::index_sequence< Ns... >, const T &lo, const T &hi, const U &typ) noexcept
Definition: AMReX_Box.H:1684
Definition: AMReX_Amr.cpp:49
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > min_ubound_iv(BoxND< dim > const &b1, IntVectND< dim > const &hi) noexcept
Definition: AMReX_Box.H:1854
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > BoxResize(const BoxND< old_dim > &bx) noexcept
Returns a new BoxND of size new_dim by either shrinking or expanding this BoxND.
Definition: AMReX_Box.H:1772
AMREX_FORCE_INLINE auto getIndexBounds(T const &b1, T const &b2, Ts const &... b3) noexcept
Definition: AMReX_Box.H:1968
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the cell centered BoxND of length len adjacent to b on the low end along the coordinate direc...
Definition: AMReX_Box.H:1591
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > BoxShrink(const BoxND< old_dim > &bx) noexcept
Returns a new BoxND of dimension new_dim and assigns the first new_dim dimension of this BoxND to it.
Definition: AMReX_Box.H:1747
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > growHi(const BoxND< dim > &b, Direction d, int n_cell) noexcept
Definition: AMReX_Box.H:1286
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > ubound_iv(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1789
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCellHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Similar to adjCellLo but builds an adjacent BoxND on the high end.
Definition: AMReX_Box.H:1612
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 length(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1899
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< new_dim > BoxExpand(const BoxND< old_dim > &bx) noexcept
Returns a new BoxND of size new_dim and assigns all values of this BoxND to it and (small=0,...
Definition: AMReX_Box.H:1760
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IndexTypeND< detail::get_sum< d, dims... >)> IndexTypeCat(const IndexTypeND< d > &v, const IndexTypeND< dims > &...vects) noexcept
Returns a IndexTypeND obtained by concatenating the input IndexTypeNDs. The dimension of the return v...
Definition: AMReX_IndexType.H:290
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > adjCell(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to adjCellLo and adjCellHi; operates on given face.
Definition: AMReX_Box.H:1634
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< IntVectND< d >, IntVectND< dims >... > IntVectSplit(const IntVectND< detail::get_sum< d, dims... >()> &v) noexcept
Returns a tuple of IntVectND obtained by splitting the input IntVectND according to the dimensions sp...
Definition: AMReX_IntVect.H:1175
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< detail::tuple_decay_t< Ts >... > makeTuple(Ts &&... args)
Definition: AMReX_Tuple.H:252
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > bdryNode(const BoxND< dim > &b, Orientation face, int len=1) noexcept
Similar to bdryLo and bdryHi except that it operates on the given face of box b.
Definition: AMReX_Box.H:1549
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > getCell(BoxND< dim > const *boxes, int nboxes, Long icell) noexcept
Definition: AMReX_Box.H:1978
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > bdryLo(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the edge-centered BoxND (in direction dir) defining the low side of BoxND b.
Definition: AMReX_Box.H:1502
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > begin_iv(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1798
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE BoxND< detail::get_sum< d, dims... >)> BoxCat(const BoxND< d > &bx, const BoxND< dims > &...boxes) noexcept
Returns a BoxND obtained by concatenating the input BoxNDs. The dimension of the return value equals ...
Definition: AMReX_Box.H:1717
std::ostream & operator<<(std::ostream &os, const BoxND< dim > &bx)
Write an ASCII representation to the ostream.
Definition: AMReX_Box.H:1692
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > growLo(const BoxND< dim > &b, Direction d, int n_cell) noexcept
Definition: AMReX_Box.H:1266
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > refine(const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept
Refine BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo*ratio...
Definition: AMReX_Box.H:1360
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b) noexcept
Returns a BoxND with NODE based coordinates in all directions that encloses BoxND b.
Definition: AMReX_Box.H:1423
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< BoxND< d >, BoxND< dims >... > BoxSplit(const BoxND< detail::get_sum< d, dims... >()> &bx) noexcept
Returns a tuple of BoxNDs obtained by splitting the input BoxND according to the dimensions specified...
Definition: AMReX_Box.H:1732
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
Direction
Definition: AMReX_Orientation.H:14
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > lbound_iv(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1780
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > grow(const BoxND< dim > &b, Direction d, int n_cell) noexcept
Definition: AMReX_Box.H:1246
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > length_iv(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1816
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > end_iv(BoxND< dim > const &box) noexcept
Definition: AMReX_Box.H:1807
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > max_lbound_iv(BoxND< dim > const &b1, IntVectND< dim > const &lo) noexcept
Definition: AMReX_Box.H:1835
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > minBox(const BoxND< dim > &b1, const BoxND< dim > &b2) noexcept
Modify BoxND to that of the minimum BoxND containing both the original BoxND and the argument....
Definition: AMReX_Box.H:1670
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 length(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:322
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > shift(const BoxND< dim > &b, const IntVectND< dim > &nzones) noexcept
Definition: AMReX_Box.H:1383
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > coarsen(const BoxND< dim > &b, const IntVectND< dim > &ref_ratio) noexcept
Coarsen BoxND by given (positive) refinement ratio. NOTE: if type(dir) = CELL centered: lo <- lo/rati...
Definition: AMReX_Box.H:1324
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > convert(const BoxND< dim > &b, const IndexTypeND< dim > &typ) noexcept
Definition: AMReX_Box.H:1446
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > growHi(const BoxND< dim > &b, int idir, int n_cell) noexcept
Definition: AMReX_Box.H:1275
const int[]
Definition: AMReX_BLProfiler.cpp:1664
void AllGatherBoxes(Vector< Box > &bxs, int n_extra_reserve)
Definition: AMReX_Box.cpp:120
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > makeSlab(BoxND< dim > const &b, int direction, int slab_index) noexcept
Definition: AMReX_Box.H:1998
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > makeSingleCellBox(IntVectND< dim > const &vect, IndexTypeND< dim > typ=IndexTypeND< dim >::TheCellType())
Definition: AMReX_Box.H:2020
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE GpuTuple< IndexTypeND< d >, IndexTypeND< dims >... > IndexTypeSplit(const IndexTypeND< detail::get_sum< d, dims... >()> &v) noexcept
Returns a tuple of IndexTypeND obtained by splitting the input IndexTypeND according to the dimension...
Definition: AMReX_IndexType.H:310
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE IntVectND< detail::get_sum< d, dims... >)> IntVectCat(const IntVectND< d > &v, const IntVectND< dims > &...vects) noexcept
Returns a IntVectND obtained by concatenating the input IntVectNDs. The dimension of the return value...
Definition: AMReX_IntVect.H:1159
std::istream & operator>>(std::istream &is, BoxND< dim > &bx)
Read from istream.
Definition: AMReX_Box.H:1700
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > bdryHi(const BoxND< dim > &b, int dir, int len=1) noexcept
Returns the edge-centered BoxND (in direction dir) defining the high side of BoxND b.
Definition: AMReX_Box.H:1525
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > enclosedCells(const BoxND< dim > &b) noexcept
Returns a BoxND with CELL based coordinates in all directions that is enclosed by b.
Definition: AMReX_Box.H:1487
Definition: AMReX_FabArrayCommI.H:896
Definition: AMReX_Box.H:2073
BoxIndexerND(BoxND< 1 > const &box)
Definition: AMReX_Box.H:2078
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::uint64_t numPts() const
Definition: AMReX_Box.H:2096
std::uint64_t npts
Definition: AMReX_Box.H:2074
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< 1 > intVect(std::uint64_t icell) const
Definition: AMReX_Box.H:2084
int lo
Definition: AMReX_Box.H:2076
Definition: AMReX_Box.H:2027
BoxIndexerND(BoxND< dim > const &box)
Definition: AMReX_Box.H:2032
IntVectND< dim > lo
Definition: AMReX_Box.H:2030
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > intVect(std::uint64_t icell) const
Definition: AMReX_Box.H:2044
std::uint64_t npts
Definition: AMReX_Box.H:2028
CellIndex
The cell index type: one of CELL or NODE.
Definition: AMReX_IndexType.H:20
Definition: AMReX_Dim3.H:12
Definition: AMReX_Array.H:34
Definition: AMReX_Math.H:343