Block-Structured AMR Software Framework
AMReX_ParticleLocator.H
Go to the documentation of this file.
1 #ifndef AMREX_PARTICLE_LOCATOR_H_
2 #define AMREX_PARTICLE_LOCATOR_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_ParGDB.H>
6 #include <AMReX_GpuContainers.H>
7 #include <AMReX_Tuple.H>
8 
9 namespace amrex
10 {
11 
12 template <class BinIteratorFactory>
13 struct AssignGrid
14 {
15  BinIteratorFactory m_bif;
16 
21 
25 
27  AssignGrid () = default;
28 
29  AssignGrid (BinIteratorFactory a_bif,
30  const IntVect& a_bins_lo, const IntVect& a_bins_hi, const IntVect& a_bin_size,
31  const IntVect& a_num_bins, const Geometry& a_geom)
32  : m_bif(a_bif),
33  m_lo(a_bins_lo.dim3()), m_hi(a_bins_hi.dim3()), m_bin_size(a_bin_size.dim3()),
34  m_num_bins(a_num_bins.dim3()), m_domain(a_geom.Domain()),
35  m_plo(a_geom.ProbLoArray()), m_dxi(a_geom.InvCellSizeArray())
36  {
37  // clamp bin size and num_bins to 1 for AMREX_SPACEDIM < 3
38  if (m_bin_size.x >= 0) {m_bin_size.x = amrex::max(m_bin_size.x, 1);}
39  if (m_bin_size.y >= 0) {m_bin_size.y = amrex::max(m_bin_size.y, 1);}
40  if (m_bin_size.z >= 0) {m_bin_size.z = amrex::max(m_bin_size.z, 1);}
41 
42  if (m_bin_size.x >= 0) {m_num_bins.x = amrex::max(m_num_bins.x, 1);}
43  if (m_bin_size.y >= 0) {m_num_bins.y = amrex::max(m_num_bins.y, 1);}
44  if (m_bin_size.z >= 0) {m_num_bins.z = amrex::max(m_num_bins.z, 1);}
45  }
46 
47  template <typename P, typename Assignor = DefaultAssignor>
49  int operator() (const P& p, int nGrow=0, Assignor const& assignor = Assignor{}) const noexcept
50  {
51  const auto iv = assignor(p, m_plo, m_dxi, m_domain);
52  return this->operator()(iv, nGrow);
53  }
54 
56  int operator() (const IntVect& iv, int nGrow=0) const noexcept
57  {
58  if (AMREX_D_TERM((m_num_bins.x == 0), && (m_num_bins.y == 0), && (m_num_bins.z == 0))) {
59  return -1;
60  }
61  const auto lo = iv.dim3();
62  int ix_lo = amrex::max((lo.x - nGrow - m_lo.x) / m_bin_size.x - 1, 0);
63  int iy_lo = amrex::max((lo.y - nGrow - m_lo.y) / m_bin_size.y - 1, 0);
64  int iz_lo = amrex::max((lo.z - nGrow - m_lo.z) / m_bin_size.z - 1, 0);
65 
66  int ix_hi = amrex::min((lo.x + nGrow - m_lo.x) / m_bin_size.x, m_num_bins.x-1);
67  int iy_hi = amrex::min((lo.y + nGrow - m_lo.y) / m_bin_size.y, m_num_bins.y-1);
68  int iz_hi = amrex::min((lo.z + nGrow - m_lo.z) / m_bin_size.z, m_num_bins.z-1);
69  int loc = -1;
70  for (int ii = ix_lo; ii <= ix_hi; ++ii) {
71  for (int jj = iy_lo; jj <= iy_hi; ++jj) {
72  for (int kk = iz_lo; kk <= iz_hi; ++kk) {
73  int index = (ii * m_num_bins.y + jj) * m_num_bins.z + kk;
74  for (const auto& nbor : m_bif.getBinIterator(index)) {
75  Box bx = nbor.second;
76  if (bx.contains(iv)) {
77  return nbor.first;
78  }
79  Box gbx = bx;
80  gbx.grow(nGrow);
81  if (gbx.contains(iv)) {
82  if (loc < 0) {
83  loc = nbor.first;
84  }
85  // Prefer particle not in corner ghost cells
86  for (int dir = 0; dir < AMREX_SPACEDIM; ++dir) {
87  Box gdbx = bx;
88  gdbx.grow(dir, nGrow);
89  if (gdbx.contains(iv)) {
90  loc = nbor.first;
91  }
92  }
93  }
94  }
95  }
96  }
97  }
98  return loc;
99  }
100 };
101 
102 template <class Bins>
104 {
105 public:
106 
107  using BinIteratorFactory = typename Bins::BinIteratorFactory;
108 
109  ParticleLocator () = default;
110 
111  void build (const BoxArray& ba, const Geometry& geom)
112  {
113  m_defined = true;
114  m_ba = ba;
115  m_geom = geom;
116  int num_boxes = static_cast<int>(ba.size());
117  m_host_boxes.resize(0);
118  for (int i = 0; i < num_boxes; ++i) { m_host_boxes.push_back(ba[i]); }
119 
120  m_device_boxes.resize(num_boxes);
122 
123  if (num_boxes == 0) {
124  m_bins_lo = IntVect(AMREX_D_DECL( 0, 0, 0));
125  m_bins_hi = IntVect(AMREX_D_DECL(-1, -1, -1));
126  m_bin_size = IntVect(AMREX_D_DECL(-1, -1, -1));
127  m_num_bins = IntVect(AMREX_D_DECL( 0, 0, 0));
128  return;
129  }
130 
131  // compute the lo, hi and the max box size in each direction
135  ReduceData<AMREX_D_DECL(int, int, int),
136  AMREX_D_DECL(int, int, int),
137  AMREX_D_DECL(int, int, int)> reduce_data(reduce_op);
138  using ReduceTuple = typename decltype(reduce_data)::Type;
139 
140  auto *const boxes_ptr = m_device_boxes.dataPtr();
141  reduce_op.eval(num_boxes, reduce_data,
142  [=] AMREX_GPU_DEVICE (int i) -> ReduceTuple
143  {
144  const Box& box = boxes_ptr[i];
145  IntVect lo = box.smallEnd();
146  IntVect hi = box.bigEnd();
147  IntVect si = box.length();
148  return {AMREX_D_DECL(lo[0], lo[1], lo[2]),
149  AMREX_D_DECL(hi[0], hi[1], hi[2]),
150  AMREX_D_DECL(si[0], si[1], si[2])};
151  });
152 
153  ReduceTuple hv = reduce_data.value(reduce_op);
154 
155  m_bins_lo = IntVect(AMREX_D_DECL(amrex::get<0>(hv),
156  amrex::get<1>(hv),
157  amrex::get<2>(hv)));
158  m_bins_hi = IntVect(AMREX_D_DECL(amrex::get< AMREX_SPACEDIM >(hv),
159  amrex::get< AMREX_SPACEDIM+1>(hv),
160  amrex::get< AMREX_SPACEDIM+2>(hv)));
161  m_bin_size = IntVect(AMREX_D_DECL(amrex::get<2*AMREX_SPACEDIM>(hv),
162  amrex::get<2*AMREX_SPACEDIM+1>(hv),
163  amrex::get<2*AMREX_SPACEDIM+2>(hv)));
164 
166 
168  IntVect bin_size = m_bin_size;
169  IntVect bins_lo = m_bins_lo;
170  m_bins.build(num_boxes, boxes_ptr, bins_box,
171  [=] AMREX_GPU_DEVICE (const Box& box) noexcept -> IntVect
172  {
173  return (box.smallEnd() - bins_lo) / bin_size;
174  });
175  }
176 
177  void setGeometry (const Geometry& a_geom) noexcept
178  {
180  m_geom = a_geom;
181  }
182 
184  {
186  return AssignGrid<BinIteratorFactory>(m_bins.getBinIteratorFactory(),
188  }
189 
190  bool isValid (const BoxArray& ba) const noexcept
191  {
192  if (m_defined) { return BoxArray::SameRefs(m_ba, ba); }
193  return false;
194  }
195 
196 protected:
197 
198  bool m_defined{false};
199 
202 
207 
208  Bins m_bins;
209 
212 };
213 
214 template <class BinIteratorFactory>
216 {
218  std::size_t m_size;
219 
220  AmrAssignGrid(const AssignGrid<BinIteratorFactory>* a_funcs, std::size_t a_size)
221  : m_funcs(a_funcs), m_size(a_size)
222  {}
223 
224  template <typename P, typename Assignor = DefaultAssignor>
226  GpuTuple<int, int> operator() (const P& p, int lev_min=-1, int lev_max=-1, int nGrow=0,
227  Assignor const& assignor = {}) const noexcept
228  {
229  lev_min = (lev_min == -1) ? 0 : lev_min;
230  lev_max = (lev_max == -1) ? m_size - 1 : lev_max;
231 
232  for (int lev = lev_max; lev >= lev_min; --lev)
233  {
234  int grid = m_funcs[lev](p, 0, assignor);
235  if (grid >= 0) { return makeTuple(grid, lev); }
236  }
237 
238  for (int lev = lev_min; lev >= lev_min; --lev)
239  {
240  int grid = m_funcs[lev](p, nGrow, assignor);
241  if (grid >= 0) { return makeTuple(grid, lev); }
242  }
243 
244  return makeTuple(-1, -1);
245  }
246 };
247 
248 template <class Bins>
250 {
251 public:
252  using BinIteratorFactory = typename Bins::BinIteratorFactory;
253 
254 private:
257  bool m_defined = false;
258 
259 public:
260 
261  AmrParticleLocator() = default;
262 
264  const Vector<Geometry>& a_geom)
265  {
266  build(a_ba, a_geom);
267  }
268 
270  {
271  build(a_gdb);
272  }
273 
274  void build (const Vector<BoxArray>& a_ba,
275  const Vector<Geometry>& a_geom)
276  {
277  m_defined = true;
278  int num_levels = static_cast<int>(a_ba.size());
279  m_locators.resize(num_levels);
280  m_grid_assignors.resize(num_levels);
281 #ifdef AMREX_USE_GPU
282  Gpu::HostVector<AssignGrid<BinIteratorFactory> > h_grid_assignors(num_levels);
283  for (int lev = 0; lev < num_levels; ++lev)
284  {
285  m_locators[lev].build(a_ba[lev], a_geom[lev]);
286  h_grid_assignors[lev] = m_locators[lev].getGridAssignor();
287  }
288  Gpu::htod_memcpy_async(m_grid_assignors.data(), h_grid_assignors.data(),
289  sizeof(AssignGrid<BinIteratorFactory>)*num_levels);
291 #else
292  for (int lev = 0; lev < num_levels; ++lev)
293  {
294  m_locators[lev].build(a_ba[lev], a_geom[lev]);
295  m_grid_assignors[lev] = m_locators[lev].getGridAssignor();
296  }
297 #endif
298  }
299 
300  void build (const ParGDBBase* a_gdb)
301  {
302  Vector<BoxArray> ba;
303  Vector<Geometry> geom;
304  int num_levels = a_gdb->finestLevel()+1;
305  for (int lev = 0; lev < num_levels; ++lev)
306  {
307  ba.push_back(a_gdb->ParticleBoxArray(lev));
308  geom.push_back(a_gdb->Geom(lev));
309  }
310  build(ba, geom);
311  }
312 
313  [[nodiscard]] bool isValid (const Vector<BoxArray>& a_ba) const
314  {
315  if ( !m_defined || (m_locators.empty()) ||
316  (m_locators.size() != a_ba.size()) ) { return false; }
317  bool all_valid = true;
318  int num_levels = m_locators.size();
319  for (int lev = 0; lev < num_levels; ++lev) {
320  all_valid = all_valid && m_locators[lev].isValid(a_ba[lev]);
321  }
322  return all_valid;
323  }
324 
325  bool isValid (const ParGDBBase* a_gdb) const
326  {
327  Vector<BoxArray> ba;
328  int num_levels = a_gdb->finestLevel()+1;
329  for (int lev = 0; lev < num_levels; ++lev) {
330  ba.push_back(a_gdb->ParticleBoxArray(lev));
331  }
332  return this->isValid(ba);
333  }
334 
335  void setGeometry (const ParGDBBase* a_gdb)
336  {
337  int num_levels = a_gdb->finestLevel()+1;
338 #ifdef AMREX_USE_GPU
339  Gpu::HostVector<AssignGrid<BinIteratorFactory> > h_grid_assignors(num_levels);
340  for (int lev = 0; lev < num_levels; ++lev)
341  {
342  m_locators[lev].setGeometry(a_gdb->Geom(lev));
343  h_grid_assignors[lev] = m_locators[lev].getGridAssignor();
344  }
345  Gpu::htod_memcpy_async(m_grid_assignors.data(), h_grid_assignors.data(),
346  sizeof(AssignGrid<BinIteratorFactory>)*num_levels);
348 #else
349  for (int lev = 0; lev < num_levels; ++lev)
350  {
351  m_locators[lev].setGeometry(a_gdb->Geom(lev));
352  m_grid_assignors[lev] = m_locators[lev].getGridAssignor();
353  }
354 #endif
355  }
356 
357  [[nodiscard]] AmrAssignGrid<BinIteratorFactory> getGridAssignor () const noexcept
358  {
361  }
362 };
363 
364 }
365 
366 #endif
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
#define AMREX_D_TERM(a, b, c)
Definition: AMReX_SPACE.H:129
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
Definition: AMReX_ParticleLocator.H:250
void build(const ParGDBBase *a_gdb)
Definition: AMReX_ParticleLocator.H:300
AmrParticleLocator(const ParGDBBase *a_gdb)
Definition: AMReX_ParticleLocator.H:269
Gpu::DeviceVector< AssignGrid< BinIteratorFactory > > m_grid_assignors
Definition: AMReX_ParticleLocator.H:256
bool m_defined
Definition: AMReX_ParticleLocator.H:257
typename Bins::BinIteratorFactory BinIteratorFactory
Definition: AMReX_ParticleLocator.H:252
AmrAssignGrid< BinIteratorFactory > getGridAssignor() const noexcept
Definition: AMReX_ParticleLocator.H:357
bool isValid(const Vector< BoxArray > &a_ba) const
Definition: AMReX_ParticleLocator.H:313
void setGeometry(const ParGDBBase *a_gdb)
Definition: AMReX_ParticleLocator.H:335
AmrParticleLocator(const Vector< BoxArray > &a_ba, const Vector< Geometry > &a_geom)
Definition: AMReX_ParticleLocator.H:263
bool isValid(const ParGDBBase *a_gdb) const
Definition: AMReX_ParticleLocator.H:325
void build(const Vector< BoxArray > &a_ba, const Vector< Geometry > &a_geom)
Definition: AMReX_ParticleLocator.H:274
Vector< ParticleLocator< Bins > > m_locators
Definition: AMReX_ParticleLocator.H:255
A collection of Boxes stored in an Array.
Definition: AMReX_BoxArray.H:530
static bool SameRefs(const BoxArray &lhs, const BoxArray &rhs)
whether two BoxArrays share the same data
Definition: AMReX_BoxArray.H:800
Long size() const noexcept
Return the number of boxes in the BoxArray.
Definition: AMReX_BoxArray.H:577
AMREX_GPU_HOST_DEVICE const IntVectND< dim > & smallEnd() const &noexcept
Get the smallend of the BoxND.
Definition: AMReX_Box.H:105
AMREX_GPU_HOST_DEVICE BoxND & grow(int i) noexcept
Definition: AMReX_Box.H:627
AMREX_GPU_HOST_DEVICE const IntVectND< dim > & bigEnd() const &noexcept
Get the bigend.
Definition: AMReX_Box.H:116
AMREX_GPU_HOST_DEVICE IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition: AMReX_Box.H:146
AMREX_GPU_HOST_DEVICE bool contains(const IntVectND< dim > &p) const noexcept
Returns true if argument is contained within BoxND.
Definition: AMReX_Box.H:204
Rectangular problem domain geometry.
Definition: AMReX_Geometry.H:73
Definition: AMReX_Tuple.H:93
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 dim3() const noexcept
Definition: AMReX_IntVect.H:163
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheUnitVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:682
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE IntVectND< dim > TheZeroVector() noexcept
This static member function returns a reference to a constant IntVectND object, all of whose dim argu...
Definition: AMReX_IntVect.H:672
Definition: AMReX_PODVector.H:246
T * data() noexcept
Definition: AMReX_PODVector.H:593
Definition: AMReX_ParGDB.H:13
virtual const Geometry & Geom(int level) const =0
virtual int finestLevel() const =0
virtual const BoxArray & ParticleBoxArray(int level) const =0
Definition: AMReX_ParticleLocator.H:104
IntVect m_bin_size
Definition: AMReX_ParticleLocator.H:205
IntVect m_bins_hi
Definition: AMReX_ParticleLocator.H:204
BoxArray m_ba
Definition: AMReX_ParticleLocator.H:200
Geometry m_geom
Definition: AMReX_ParticleLocator.H:201
Gpu::HostVector< Box > m_host_boxes
Definition: AMReX_ParticleLocator.H:210
IntVect m_bins_lo
Definition: AMReX_ParticleLocator.H:203
Bins m_bins
Definition: AMReX_ParticleLocator.H:208
IntVect m_num_bins
Definition: AMReX_ParticleLocator.H:206
typename Bins::BinIteratorFactory BinIteratorFactory
Definition: AMReX_ParticleLocator.H:107
Gpu::DeviceVector< Box > m_device_boxes
Definition: AMReX_ParticleLocator.H:211
AssignGrid< BinIteratorFactory > getGridAssignor() const noexcept
Definition: AMReX_ParticleLocator.H:183
bool isValid(const BoxArray &ba) const noexcept
Definition: AMReX_ParticleLocator.H:190
void setGeometry(const Geometry &a_geom) noexcept
Definition: AMReX_ParticleLocator.H:177
bool m_defined
Definition: AMReX_ParticleLocator.H:198
void build(const BoxArray &ba, const Geometry &geom)
Definition: AMReX_ParticleLocator.H:111
Definition: AMReX_Reduce.H:249
Type value()
Definition: AMReX_Reduce.H:281
Definition: AMReX_Reduce.H:364
std::enable_if_t< IsFabArray< MF >::value > eval(MF const &mf, IntVect const &nghost, D &reduce_data, F &&f)
Definition: AMReX_Reduce.H:441
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition: AMReX_Vector.H:27
Long size() const noexcept
Definition: AMReX_Vector.H:50
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition: AMReX_GpuContainers.H:233
static constexpr HostToDevice hostToDevice
Definition: AMReX_GpuContainers.H:98
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
void htod_memcpy_async(void *p_d, const void *p_h, const std::size_t sz) noexcept
Definition: AMReX_GpuDevice.H:251
static constexpr int P
Definition: AMReX_OpenBC.H:14
Definition: AMReX_Amr.cpp:49
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & max(const T &a, const T &b) noexcept
Definition: AMReX_Algorithm.H:35
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & min(const T &a, const T &b) noexcept
Definition: AMReX_Algorithm.H:21
constexpr AMREX_GPU_HOST_DEVICE GpuTuple< detail::tuple_decay_t< Ts >... > makeTuple(Ts &&... args)
Definition: AMReX_Tuple.H:252
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
Definition: AMReX_ParticleLocator.H:216
std::size_t m_size
Definition: AMReX_ParticleLocator.H:218
AmrAssignGrid(const AssignGrid< BinIteratorFactory > *a_funcs, std::size_t a_size)
Definition: AMReX_ParticleLocator.H:220
const AssignGrid< BinIteratorFactory > * m_funcs
Definition: AMReX_ParticleLocator.H:217
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE GpuTuple< int, int > operator()(const P &p, int lev_min=-1, int lev_max=-1, int nGrow=0, Assignor const &assignor={}) const noexcept
Definition: AMReX_ParticleLocator.H:226
Definition: AMReX_ParticleLocator.H:14
Dim3 m_hi
Definition: AMReX_ParticleLocator.H:18
AMREX_GPU_HOST_DEVICE AssignGrid()=default
AssignGrid(BinIteratorFactory a_bif, const IntVect &a_bins_lo, const IntVect &a_bins_hi, const IntVect &a_bin_size, const IntVect &a_num_bins, const Geometry &a_geom)
Definition: AMReX_ParticleLocator.H:29
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE int operator()(const P &p, int nGrow=0, Assignor const &assignor=Assignor{}) const noexcept
Definition: AMReX_ParticleLocator.H:49
Dim3 m_bin_size
Definition: AMReX_ParticleLocator.H:19
Box m_domain
Definition: AMReX_ParticleLocator.H:22
Dim3 m_num_bins
Definition: AMReX_ParticleLocator.H:20
BinIteratorFactory m_bif
Definition: AMReX_ParticleLocator.H:15
GpuArray< Real, AMREX_SPACEDIM > m_dxi
Definition: AMReX_ParticleLocator.H:24
Dim3 m_lo
Definition: AMReX_ParticleLocator.H:17
GpuArray< Real, AMREX_SPACEDIM > m_plo
Definition: AMReX_ParticleLocator.H:23
Definition: AMReX_Dim3.H:12
int x
Definition: AMReX_Dim3.H:12
int z
Definition: AMReX_Dim3.H:12
int y
Definition: AMReX_Dim3.H:12
Definition: AMReX_Reduce.H:147
Definition: AMReX_Reduce.H:114