Block-Structured AMR Software Framework
AMReX_MLTensor_2D_K.H
Go to the documentation of this file.
1 #ifndef AMREX_MLTENSOR_2D_K_H_
2 #define AMREX_MLTENSOR_2D_K_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_MLLinOp_K.H>
6 
7 namespace amrex {
8 
10 void mltensor_fill_corners (int icorner, Box const& vbox, // vbox: the valid box
11  Array4<Real> const& vel,
12  Array4<int const> const& mxlo,
13  Array4<int const> const& mylo,
14  Array4<int const> const& mxhi,
15  Array4<int const> const& myhi,
16  Array4<Real const> const& bcvalxlo,
17  Array4<Real const> const& bcvalylo,
18  Array4<Real const> const& bcvalxhi,
19  Array4<Real const> const& bcvalyhi,
21  0,2*AMREX_SPACEDIM,
22  0,AMREX_SPACEDIM> const& bct,
23  Array2D<Real,
24  0,2*AMREX_SPACEDIM,
25  0,AMREX_SPACEDIM> const& bcl,
26  int inhomog, int maxorder,
27  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
28  Dim3 const& dlo, Dim3 const& dhi) noexcept
29 {
30  constexpr int k = 0;
31  const auto blen = amrex::length(vbox);
32  const auto vlo = amrex::lbound(vbox);
33  const auto vhi = amrex::ubound(vbox);
34 
35  if (icorner == 0) { // xlo & ylo
36  int const i = vlo.x-1;
37  int const j = vlo.y-1;
38  if (mxlo(i,j,k) != BndryData::covered && (dlo.x != vlo.x || dlo.y != vlo.y)) {
39  bool x_interior = mylo(i+1,j ,k) == BndryData::covered; // i+1,j is a valid cell inside domain
40  bool x_exterior = mylo(i+1,j ,k) == BndryData::not_covered; // i+1,j is a ghost cell inside domain
41  bool y_interior = mxlo(i ,j+1,k) == BndryData::covered;
42  bool y_exterior = mxlo(i ,j+1,k) == BndryData::not_covered;
43  if ((x_interior && y_interior) || (x_exterior && y_exterior)) {
44  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
45  mllinop_apply_bc_x(Orientation::low, i,j,k, blen.x, vel, mxlo,
46  bct(Orientation::xlo(), icomp),
47  bcl(Orientation::xlo(), icomp),
48  bcvalxlo, maxorder, dxinv[0], inhomog, icomp);
49  Real tmp = vel(i,j,k,icomp);
50  mllinop_apply_bc_y(Orientation::low, i,j,k, blen.y, vel, mylo,
51  bct(Orientation::ylo(), icomp),
52  bcl(Orientation::ylo(), icomp),
53  bcvalylo, maxorder, dxinv[1], inhomog, icomp);
54  vel(i,j,k,icomp) = 0.5_rt*(tmp+vel(i,j,k,icomp));
55  }
56  } else if (x_interior || dlo.x == vlo.x) {
57  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
58  mllinop_apply_bc_x(Orientation::low, i,j,k, blen.x, vel, mxlo,
59  bct(Orientation::xlo(), icomp),
60  bcl(Orientation::xlo(), icomp),
61  bcvalxlo, maxorder, dxinv[0], inhomog, icomp);
62  }
63  } else if (y_interior || dlo.y == vlo.y) {
64  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
65  mllinop_apply_bc_y(Orientation::low, i,j,k, blen.y, vel, mylo,
66  bct(Orientation::ylo(), icomp),
67  bcl(Orientation::ylo(), icomp),
68  bcvalylo, maxorder, dxinv[1], inhomog, icomp);
69  }
70  }
71  }
72  } else if (icorner == 1) { // xhi & ylo
73  int const i = vhi.x+1;
74  int const j = vlo.y-1;
75  if (mxhi(i,j,k) != BndryData::covered && (dhi.x != vhi.x || dlo.y != vlo.y)) {
76  bool x_interior = mylo(i-1,j ,k) == BndryData::covered;
77  bool x_exterior = mylo(i-1,j ,k) == BndryData::not_covered;
78  bool y_interior = mxhi(i ,j+1,k) == BndryData::covered;
79  bool y_exterior = mxhi(i ,j+1,k) == BndryData::not_covered;
80  if ((x_interior && y_interior) || (x_exterior && y_exterior)) {
81  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
82  mllinop_apply_bc_x(Orientation::high, i,j,k, blen.x, vel, mxhi,
83  bct(Orientation::xhi(), icomp),
84  bcl(Orientation::xhi(), icomp),
85  bcvalxhi, maxorder, dxinv[0], inhomog, icomp);
86  Real tmp = vel(i,j,k,icomp);
87  mllinop_apply_bc_y(Orientation::low, i,j,k, blen.y, vel, mylo,
88  bct(Orientation::ylo(), icomp),
89  bcl(Orientation::ylo(), icomp),
90  bcvalylo, maxorder, dxinv[1], inhomog, icomp);
91  vel(i,j,k,icomp) = 0.5_rt*(tmp+vel(i,j,k,icomp));
92  }
93  } else if (x_interior || dhi.x == vhi.x) {
94  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
95  mllinop_apply_bc_x(Orientation::high, i,j,k, blen.x, vel, mxhi,
96  bct(Orientation::xhi(), icomp),
97  bcl(Orientation::xhi(), icomp),
98  bcvalxhi, maxorder, dxinv[0], inhomog, icomp);
99  }
100  } else if (y_interior || dlo.y == vlo.y) {
101  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
102  mllinop_apply_bc_y(Orientation::low, i,j,k, blen.y, vel, mylo,
103  bct(Orientation::ylo(), icomp),
104  bcl(Orientation::ylo(), icomp),
105  bcvalylo, maxorder, dxinv[1], inhomog, icomp);
106  }
107  }
108  }
109  } else if (icorner == 2) { // xlo & yhi
110  int const i = vlo.x-1;
111  int const j = vhi.y+1;
112  if (mxlo(i,j,k) != BndryData::covered && (dlo.x != vlo.x || dhi.y != vhi.y)) {
113  bool x_interior = myhi(i+1,j ,k) == BndryData::covered;
114  bool x_exterior = myhi(i+1,j ,k) == BndryData::not_covered;
115  bool y_interior = mxlo(i ,j-1,k) == BndryData::covered;
116  bool y_exterior = mxlo(i ,j-1,k) == BndryData::not_covered;
117  if ((x_interior && y_interior) || (x_exterior && y_exterior)) {
118  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
119  mllinop_apply_bc_x(Orientation::low, i,j,k, blen.x, vel, mxlo,
120  bct(Orientation::xlo(), icomp),
121  bcl(Orientation::xlo(), icomp),
122  bcvalxlo, maxorder, dxinv[0], inhomog, icomp);
123  Real tmp = vel(i,j,k,icomp);
124  mllinop_apply_bc_y(Orientation::high, i,j,k, blen.y, vel, myhi,
125  bct(Orientation::yhi(), icomp),
126  bcl(Orientation::yhi(), icomp),
127  bcvalyhi, maxorder, dxinv[1], inhomog, icomp);
128  vel(i,j,k,icomp) = 0.5_rt*(tmp+vel(i,j,k,icomp));
129  }
130  } else if (x_interior || dlo.x == vlo.x) {
131  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
132  mllinop_apply_bc_x(Orientation::low, i,j,k, blen.x, vel, mxlo,
133  bct(Orientation::xlo(), icomp),
134  bcl(Orientation::xlo(), icomp),
135  bcvalxlo, maxorder, dxinv[0], inhomog, icomp);
136  }
137  } else if (y_interior || dhi.y == vhi.y) {
138  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
139  mllinop_apply_bc_y(Orientation::high, i,j,k, blen.y, vel, myhi,
140  bct(Orientation::yhi(), icomp),
141  bcl(Orientation::yhi(), icomp),
142  bcvalyhi, maxorder, dxinv[1], inhomog, icomp);
143  }
144  }
145  }
146  } else if (icorner == 3) { // xhi & yhi
147  int const i = vhi.x+1;
148  int const j = vhi.y+1;
149  if (mxhi(i,j,k) != BndryData::covered && (dhi.x != vhi.x || dhi.y != vhi.y)) {
150  bool x_interior = myhi(i-1,j ,k) == BndryData::covered;
151  bool x_exterior = myhi(i-1,j ,k) == BndryData::not_covered;
152  bool y_interior = mxhi(i ,j-1,k) == BndryData::covered;
153  bool y_exterior = mxhi(i ,j-1,k) == BndryData::not_covered;
154  if ((x_interior && y_interior) || (x_exterior && y_exterior)) {
155  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
156  mllinop_apply_bc_x(Orientation::high, i,j,k, blen.x, vel, mxhi,
157  bct(Orientation::xhi(), icomp),
158  bcl(Orientation::xhi(), icomp),
159  bcvalxhi, maxorder, dxinv[0], inhomog, icomp);
160  Real tmp = vel(i,j,k,icomp);
161  mllinop_apply_bc_y(Orientation::high, i,j,k, blen.y, vel, myhi,
162  bct(Orientation::yhi(), icomp),
163  bcl(Orientation::yhi(), icomp),
164  bcvalyhi, maxorder, dxinv[1], inhomog, icomp);
165  vel(i,j,k,icomp) = 0.5_rt*(tmp+vel(i,j,k,icomp));
166  }
167  } else if (x_interior || dhi.x == vhi.x) {
168  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
169  mllinop_apply_bc_x(Orientation::high, i,j,k, blen.x, vel, mxhi,
170  bct(Orientation::xhi(), icomp),
171  bcl(Orientation::xhi(), icomp),
172  bcvalxhi, maxorder, dxinv[0], inhomog, icomp);
173  }
174  } else if (y_interior || dhi.y == vhi.y) {
175  for (int icomp = 0; icomp < AMREX_SPACEDIM; ++icomp) {
176  mllinop_apply_bc_y(Orientation::high, i,j,k, blen.y, vel, myhi,
177  bct(Orientation::yhi(), icomp),
178  bcl(Orientation::yhi(), icomp),
179  bcvalyhi, maxorder, dxinv[1], inhomog, icomp);
180  }
181  }
182  }
183  }
184 }
185 
187 void mltensor_cross_terms_fx (Box const& box, Array4<Real> const& fx,
188  Array4<Real const> const& vel,
189  Array4<Real const> const& etax,
190  Array4<Real const> const& kapx,
191  GpuArray<Real,AMREX_SPACEDIM> const& dxinv) noexcept
192 {
193  const Real dyi = dxinv[1];
194  const auto lo = amrex::lbound(box);
195  const auto hi = amrex::ubound(box);
196  constexpr Real twoThirds = Real(2./3.);
197 
198  int k = 0;
199  for (int j = lo.y; j <= hi.y; ++j) {
201  for (int i = lo.x; i <= hi.x; ++i) {
202  Real dudy = mltensor_dy_on_xface(i,j,k,0,vel,dyi);
203  Real dvdy = mltensor_dy_on_xface(i,j,k,1,vel,dyi);
204  Real divu = dvdy;
205  Real xif = kapx(i,j,0);
206  Real mun = Real(0.75)*(etax(i,j,0,0)-xif); // restore the original eta
207  Real mut = etax(i,j,0,1);
208  fx(i,j,0,0) = -mun*(-twoThirds*divu) - xif*divu;
209  fx(i,j,0,1) = -mut*dudy;
210  }
211  }
212 }
213 
215 void mltensor_cross_terms_fy (Box const& box, Array4<Real> const& fy,
216  Array4<Real const> const& vel,
217  Array4<Real const> const& etay,
218  Array4<Real const> const& kapy,
219  GpuArray<Real,AMREX_SPACEDIM> const& dxinv) noexcept
220 {
221  const Real dxi = dxinv[0];
222  const auto lo = amrex::lbound(box);
223  const auto hi = amrex::ubound(box);
224  constexpr Real twoThirds = Real(2./3.);
225 
226  int k = 0;
227  for (int j = lo.y; j <= hi.y; ++j) {
229  for (int i = lo.x; i <= hi.x; ++i) {
230  Real dudx = mltensor_dx_on_yface(i,j,k,0,vel,dxi);
231  Real dvdx = mltensor_dx_on_yface(i,j,k,1,vel,dxi);
232  Real divu = dudx;
233  Real xif = kapy(i,j,0);
234  Real mun = Real(0.75)*(etay(i,j,0,1)-xif); // restore the original eta
235  Real mut = etay(i,j,0,0);
236  fy(i,j,0,0) = -mut*dvdx;
237  fy(i,j,0,1) = -mun*(-twoThirds*divu) - xif*divu;
238  }
239  }
240 }
241 
243 void mltensor_cross_terms_fx (Box const& box, Array4<Real> const& fx,
244  Array4<Real const> const& vel,
245  Array4<Real const> const& etax,
246  Array4<Real const> const& kapx,
247  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
248  Array4<Real const> const& bvxlo,
249  Array4<Real const> const& bvxhi,
251  0,2*AMREX_SPACEDIM,
252  0,AMREX_SPACEDIM> const& bct,
253  Dim3 const& dlo, Dim3 const& dhi) noexcept
254 {
255  const Real dyi = dxinv[1];
256  const auto lo = amrex::lbound(box);
257  const auto hi = amrex::ubound(box);
258  constexpr Real twoThirds = Real(2./3.);
259 
260  // Three BC types: reflect odd, neumann, and dirichlet
261 
262  int k = 0;
263  for (int j = lo.y; j <= hi.y; ++j) {
264  for (int i = lo.x; i <= hi.x; ++i) {
265  Real dudy = mltensor_dy_on_xface(i,j,k,0,vel,dyi,bvxlo,bvxhi,bct,dlo,dhi);
266  Real dvdy = mltensor_dy_on_xface(i,j,k,1,vel,dyi,bvxlo,bvxhi,bct,dlo,dhi);
267  Real divu = dvdy;
268  Real xif = kapx(i,j,0);
269  Real mun = Real(0.75)*(etax(i,j,0,0)-xif); // restore the original eta
270  Real mut = etax(i,j,0,1);
271  fx(i,j,0,0) = -mun*(-twoThirds*divu) - xif*divu;
272  fx(i,j,0,1) = -mut*dudy;
273  }
274  }
275 }
276 
278 void mltensor_cross_terms_fy (Box const& box, Array4<Real> const& fy,
279  Array4<Real const> const& vel,
280  Array4<Real const> const& etay,
281  Array4<Real const> const& kapy,
282  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
283  Array4<Real const> const& bvylo,
284  Array4<Real const> const& bvyhi,
286  0,2*AMREX_SPACEDIM,
287  0,AMREX_SPACEDIM> const& bct,
288  Dim3 const& dlo, Dim3 const& dhi) noexcept
289 {
290  const Real dxi = dxinv[0];
291  const auto lo = amrex::lbound(box);
292  const auto hi = amrex::ubound(box);
293  constexpr Real twoThirds = Real(2./3.);
294 
295  int k = 0;
296  for (int j = lo.y; j <= hi.y; ++j) {
297  for (int i = lo.x; i <= hi.x; ++i) {
298  Real dudx = mltensor_dx_on_yface(i,j,k,0,vel,dxi,bvylo,bvyhi,bct,dlo,dhi);
299  Real dvdx = mltensor_dx_on_yface(i,j,k,1,vel,dxi,bvylo,bvyhi,bct,dlo,dhi);
300  Real divu = dudx;
301  Real xif = kapy(i,j,0);
302  Real mun = Real(0.75)*(etay(i,j,0,1)-xif); // restore the original eta
303  Real mut = etay(i,j,0,0);
304  fy(i,j,0,0) = -mut*dvdx;
305  fy(i,j,0,1) = -mun*(-twoThirds*divu) - xif*divu;
306  }
307  }
308 }
309 
311 void mltensor_cross_terms (Box const& box, Array4<Real> const& Ax,
312  Array4<Real const> const& fx,
313  Array4<Real const> const& fy,
314  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
315  Real bscalar) noexcept
316 {
317  const Real dxi = bscalar * dxinv[0];
318  const Real dyi = bscalar * dxinv[1];
319  const auto lo = amrex::lbound(box);
320  const auto hi = amrex::ubound(box);
321 
322  for (int j = lo.y; j <= hi.y; ++j) {
324  for (int i = lo.x; i <= hi.x; ++i) {
325  Ax(i,j,0,0) += dxi*(fx(i+1,j ,0,0) - fx(i,j,0,0))
326  + dyi*(fy(i ,j+1,0,0) - fy(i,j,0,0));
327  Ax(i,j,0,1) += dxi*(fx(i+1,j ,0,1) - fx(i,j,0,1))
328  + dyi*(fy(i ,j+1,0,1) - fy(i,j,0,1));
329  }
330  }
331 }
332 
334 void mltensor_cross_terms_os (Box const& box, Array4<Real> const& Ax,
335  Array4<Real const> const& fx,
336  Array4<Real const> const& fy,
337  Array4<int const> const& osm,
338  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
339  Real bscalar) noexcept
340 {
341  const Real dxi = bscalar * dxinv[0];
342  const Real dyi = bscalar * dxinv[1];
343  const auto lo = amrex::lbound(box);
344  const auto hi = amrex::ubound(box);
345 
346  for (int j = lo.y; j <= hi.y; ++j) {
348  for (int i = lo.x; i <= hi.x; ++i) {
349  if (osm(i,j,0) == 0) {
350  Ax(i,j,0,0) = 0.0;
351  Ax(i,j,0,1) = 0.0;
352  } else {
353  Ax(i,j,0,0) += dxi*(fx(i+1,j ,0,0) - fx(i,j,0,0))
354  + dyi*(fy(i ,j+1,0,0) - fy(i,j,0,0));
355  Ax(i,j,0,1) += dxi*(fx(i+1,j ,0,1) - fx(i,j,0,1))
356  + dyi*(fy(i ,j+1,0,1) - fy(i,j,0,1));
357  }
358  }
359  }
360 }
361 
363 void mltensor_vel_grads_fx (Box const& box, Array4<Real> const& fx,
364  Array4<Real const> const& vel,
365  GpuArray<Real,AMREX_SPACEDIM> const& dxinv) noexcept
366 {
367  const Real dxi = dxinv[0];
368  const Real dyi = dxinv[1];
369  const auto lo = amrex::lbound(box);
370  const auto hi = amrex::ubound(box);
371 
372  int k = 0;
373  for (int j = lo.y; j <= hi.y; ++j) {
375  for (int i = lo.x; i <= hi.x; ++i) {
376  Real dudx = (vel(i,j,0,0) - vel(i-1,j,0,0))*dxi;
377  Real dvdx = (vel(i,j,0,1) - vel(i-1,j,0,1))*dxi;
378  Real dudy = mltensor_dy_on_xface(i,j,k,0,vel,dyi);
379  Real dvdy = mltensor_dy_on_xface(i,j,k,1,vel,dyi);
380  fx(i,j,0,0) = dudx;
381  fx(i,j,0,1) = dvdx;
382  fx(i,j,0,2) = dudy;
383  fx(i,j,0,3) = dvdy;
384  }
385  }
386 }
387 
389 void mltensor_vel_grads_fy (Box const& box, Array4<Real> const& fy,
390  Array4<Real const> const& vel,
391  GpuArray<Real,AMREX_SPACEDIM> const& dxinv) noexcept
392 {
393  const Real dxi = dxinv[0];
394  const Real dyi = dxinv[1];
395  const auto lo = amrex::lbound(box);
396  const auto hi = amrex::ubound(box);
397 
398  int k = 0;
399  for (int j = lo.y; j <= hi.y; ++j) {
401  for (int i = lo.x; i <= hi.x; ++i) {
402  Real dudx = mltensor_dx_on_yface(i,j,k,0,vel,dxi);
403  Real dvdx = mltensor_dx_on_yface(i,j,k,1,vel,dxi);
404  Real dudy = (vel(i,j,0,0) - vel(i,j-1,0,0))*dyi;
405  Real dvdy = (vel(i,j,0,1) - vel(i,j-1,0,1))*dyi;
406  fy(i,j,0,0) = dudx;
407  fy(i,j,0,1) = dvdx;
408  fy(i,j,0,2) = dudy;
409  fy(i,j,0,3) = dvdy;
410  }
411  }
412 }
413 
415 void mltensor_vel_grads_fx (Box const& box, Array4<Real> const& fx,
416  Array4<Real const> const& vel,
417  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
418  Array4<Real const> const& bvxlo,
419  Array4<Real const> const& bvxhi,
421  0,2*AMREX_SPACEDIM,
422  0,AMREX_SPACEDIM> const& bct,
423  Dim3 const& dlo, Dim3 const& dhi) noexcept
424 {
425  const Real dxi = dxinv[0];
426  const Real dyi = dxinv[1];
427  const auto lo = amrex::lbound(box);
428  const auto hi = amrex::ubound(box);
429 
430  int k = 0;
431  for (int j = lo.y; j <= hi.y; ++j) {
432  for (int i = lo.x; i <= hi.x; ++i) {
433  Real dudx = (vel(i,j,0,0) - vel(i-1,j,0,0))*dxi;
434  Real dvdx = (vel(i,j,0,1) - vel(i-1,j,0,1))*dxi;
435  Real dudy = mltensor_dy_on_xface(i,j,k,0,vel,dyi,bvxlo,bvxhi,bct,dlo,dhi);
436  Real dvdy = mltensor_dy_on_xface(i,j,k,1,vel,dyi,bvxlo,bvxhi,bct,dlo,dhi);
437  fx(i,j,0,0) = dudx;
438  fx(i,j,0,1) = dvdx;
439  fx(i,j,0,2) = dudy;
440  fx(i,j,0,3) = dvdy;
441  }
442  }
443 }
444 
446 void mltensor_vel_grads_fy (Box const& box, Array4<Real> const& fy,
447  Array4<Real const> const& vel,
448  GpuArray<Real,AMREX_SPACEDIM> const& dxinv,
449  Array4<Real const> const& bvylo,
450  Array4<Real const> const& bvyhi,
452  0,2*AMREX_SPACEDIM,
453  0,AMREX_SPACEDIM> const& bct,
454  Dim3 const& dlo, Dim3 const& dhi) noexcept
455 {
456  const Real dxi = dxinv[0];
457  const Real dyi = dxinv[1];
458  const auto lo = amrex::lbound(box);
459  const auto hi = amrex::ubound(box);
460 
461  int k = 0;
462  for (int j = lo.y; j <= hi.y; ++j) {
463  for (int i = lo.x; i <= hi.x; ++i) {
464  Real dudx = mltensor_dx_on_yface(i,j,k,0,vel,dxi,bvylo,bvyhi,bct,dlo,dhi);
465  Real dvdx = mltensor_dx_on_yface(i,j,k,1,vel,dxi,bvylo,bvyhi,bct,dlo,dhi);
466  Real dudy = (vel(i,j,0,0) - vel(i,j-1,0,0))*dyi;
467  Real dvdy = (vel(i,j,0,1) - vel(i,j-1,0,1))*dyi;
468  fy(i,j,0,0) = dudx;
469  fy(i,j,0,1) = dvdx;
470  fy(i,j,0,2) = dudy;
471  fy(i,j,0,3) = dvdy;
472  }
473  }
474 }
475 
476 }
477 
478 #endif
#define AMREX_PRAGMA_SIMD
Definition: AMReX_Extension.H:80
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
@ not_covered
Definition: AMReX_BndryData.H:44
@ covered
Definition: AMReX_BndryData.H:44
Maintain an identifier for boundary condition types.
Definition: AMReX_BoundCond.H:20
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE int yhi() noexcept
Int value of the y-hi-face.
Definition: AMReX_Orientation.H:110
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE int ylo() noexcept
Int value of the y-lo-face.
Definition: AMReX_Orientation.H:106
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE int xlo() noexcept
Int value of the x-lo-face.
Definition: AMReX_Orientation.H:98
@ low
Definition: AMReX_Orientation.H:34
@ high
Definition: AMReX_Orientation.H:34
AMREX_GPU_HOST_DEVICE static constexpr AMREX_FORCE_INLINE int xhi() noexcept
Int value of the x-hi-face.
Definition: AMReX_Orientation.H:102
Definition: AMReX_Amr.cpp:49
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real mltensor_dx_on_yface(int i, int j, int k, int n, Array4< Real const > const &vel, Real dxi) noexcept
Definition: AMReX_MLTensor_K.H:17
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_vel_grads_fy(Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept
Definition: AMReX_MLTensor_2D_K.H:389
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_cross_terms_os(Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, Array4< int const > const &osm, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept
Definition: AMReX_MLTensor_2D_K.H:334
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 ubound(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:315
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_cross_terms_fx(Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, Array4< Real const > const &etax, Array4< Real const > const &kapx, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept
Definition: AMReX_MLTensor_2D_K.H:187
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 lbound(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:308
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_vel_grads_fx(Box const &box, Array4< Real > const &fx, Array4< Real const > const &vel, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept
Definition: AMReX_MLTensor_2D_K.H:363
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_cross_terms(Box const &box, Array4< Real > const &Ax, Array4< Real const > const &fx, Array4< Real const > const &fy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Real bscalar) noexcept
Definition: AMReX_MLTensor_2D_K.H:311
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Dim3 length(Array4< T > const &a) noexcept
Definition: AMReX_Array4.H:322
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_bc_x(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dxinv, int inhomog, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:14
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_fill_corners(int icorner, Box const &vbox, Array4< Real > const &vel, Array4< int const > const &mxlo, Array4< int const > const &mylo, Array4< int const > const &mxhi, Array4< int const > const &myhi, Array4< Real const > const &bcvalxlo, Array4< Real const > const &bcvalylo, Array4< Real const > const &bcvalxhi, Array4< Real const > const &bcvalyhi, Array2D< BoundCond, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bct, Array2D< Real, 0, 2 *AMREX_SPACEDIM, 0, AMREX_SPACEDIM > const &bcl, int inhomog, int maxorder, GpuArray< Real, AMREX_SPACEDIM > const &dxinv, Dim3 const &dlo, Dim3 const &dhi) noexcept
Definition: AMReX_MLTensor_2D_K.H:10
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE Real mltensor_dy_on_xface(int i, int j, int k, int n, Array4< Real const > const &vel, Real dyi) noexcept
Definition: AMReX_MLTensor_K.H:11
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mllinop_apply_bc_y(int side, Box const &box, int blen, Array4< T > const &phi, Array4< int const > const &mask, BoundCond bct, T bcl, Array4< T const > const &bcval, int maxorder, T dyinv, int inhomog, int icomp) noexcept
Definition: AMReX_MLLinOp_K.H:119
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void mltensor_cross_terms_fy(Box const &box, Array4< Real > const &fy, Array4< Real const > const &vel, Array4< Real const > const &etay, Array4< Real const > const &kapy, GpuArray< Real, AMREX_SPACEDIM > const &dxinv) noexcept
Definition: AMReX_MLTensor_2D_K.H:215
Definition: AMReX_Array.H:285
Definition: AMReX_Array4.H:61
Definition: AMReX_Dim3.H:12