Block-Structured AMR Software Framework
AMReX_GpuLaunch.H
Go to the documentation of this file.
1 #ifndef AMREX_GPU_LAUNCH_H_
2 #define AMREX_GPU_LAUNCH_H_
3 #include <AMReX_Config.H>
4 
5 #include <AMReX_GpuQualifiers.H>
6 #include <AMReX_GpuKernelInfo.H>
7 #include <AMReX_GpuControl.H>
8 #include <AMReX_GpuTypes.H>
9 #include <AMReX_GpuError.H>
10 #include <AMReX_GpuRange.H>
11 #include <AMReX_GpuDevice.H>
12 #include <AMReX_GpuMemory.H>
13 #include <AMReX_GpuReduce.H>
14 #include <AMReX_Tuple.H>
15 #include <AMReX_Box.H>
16 #include <AMReX_Loop.H>
17 #include <AMReX_Extension.H>
18 #include <AMReX_BLassert.H>
19 #include <AMReX_TypeTraits.H>
20 #include <AMReX_GpuLaunchGlobal.H>
21 #include <AMReX_RandomEngine.H>
22 #include <AMReX_Algorithm.H>
23 #include <AMReX_Math.H>
24 #include <cstddef>
25 #include <limits>
26 #include <algorithm>
27 #include <utility>
28 
29 #define AMREX_GPU_NCELLS_PER_THREAD 3
30 #define AMREX_GPU_Y_STRIDE 1
31 #define AMREX_GPU_Z_STRIDE 1
32 
33 #ifdef AMREX_USE_CUDA
34 # define AMREX_LAUNCH_KERNEL(MT, blocks, threads, sharedMem, stream, ... ) \
35  amrex::launch_global<MT><<<blocks, threads, sharedMem, stream>>>(__VA_ARGS__)
36 #elif defined(AMREX_USE_HIP)
37 # define AMREX_LAUNCH_KERNEL(MT, blocks, threads, sharedMem, stream, ... ) \
38  hipLaunchKernelGGL(launch_global<MT>, blocks, threads, sharedMem, stream, __VA_ARGS__)
39 #endif
40 
41 
42 namespace amrex {
43 
44 // We cannot take rvalue lambdas.
45 // ************************************************
46 // Variadic lambda function wrappers for C++ CUDA/HIP Kernel calls.
47 
48 #if defined(AMREX_USE_CUDA) || defined(AMREX_USE_HIP)
49  template<class L, class... Lambdas>
50  AMREX_GPU_GLOBAL void launch_global (L f0, Lambdas... fs) { f0(); call_device(fs...); }
51 
52  template<class L>
53  AMREX_GPU_DEVICE void call_device (L&& f0) noexcept { f0(); }
54 
55  template<class L, class... Lambdas>
56  AMREX_GPU_DEVICE void call_device (L&& f0, Lambdas&&... fs) noexcept {
57  f0();
58  call_device(std::forward<Lambdas>(fs)...);
59  }
60 #endif
61 
62 // CPU variation
63 
64  template<class L>
65  void launch_host (L&& f0) noexcept { std::forward<L>(f0)(); }
66 
67  template<class L, class... Lambdas>
68  void launch_host (L&& f0, Lambdas&&... fs) noexcept {
69  std::forward<L>(f0)();
70  launch_host(std::forward<Lambdas>(fs)...);
71  }
72 
73 
74  template <class T> class LayoutData;
75  class FabArrayBase;
76 
77 namespace Gpu {
78 
79 #ifdef AMREX_USE_GPU
80  inline constexpr std::size_t numThreadsPerBlockParallelFor () {
81  return AMREX_GPU_MAX_THREADS;
82  }
83 #else
84  inline constexpr std::size_t numThreadsPerBlockParallelFor () { return 0; }
85 #endif
86 
87 // ************************************************
88 
89  struct ComponentBox {
91  int ic;
92  int nc;
93  };
94 
95  struct GridSize {
96  int numBlocks;
99  };
100 
101 // ************************************************
102 
104  inline
105  Box getThreadBox (const Box& bx, Long offset) noexcept
106  {
108  const auto len = bx.length3d();
109  Long k = offset / (len[0]*len[1]);
110  Long j = (offset - k*(len[0]*len[1])) / len[0];
111  Long i = (offset - k*(len[0]*len[1])) - j*len[0];
112  IntVect iv{AMREX_D_DECL(static_cast<int>(i),
113  static_cast<int>(j),
114  static_cast<int>(k))};
115  iv += bx.smallEnd();
116  return (bx & Box(iv,iv,bx.type()));
117  ))
120  return bx;
121  ))
122  }
123 
124 // ************************************************
125 
126 #ifdef AMREX_USE_GPU
128  ExecutionConfig () noexcept {
130  }
131  ExecutionConfig (const Box& box) noexcept {
132  // If we change this, we must make sure it doesn't break FabArrayUtility Reduce*,
133  // which assumes the decomposition is 1D.
135 #if 0
137  b -= box.smallEnd();
140 #endif
141  }
142  ExecutionConfig (const Box& box, int comps) noexcept {
143  const Box& b = amrex::surroundingNodes(box);
144  Gpu::Device::c_comps_threads_and_blocks(b.loVect(), b.hiVect(), comps, numBlocks, numThreads);
145  }
146  ExecutionConfig (Long N) noexcept {
148  }
149  ExecutionConfig (dim3 nb, dim3 nt, std::size_t sm=0) noexcept
150  : numBlocks(nb), numThreads(nt), sharedMem(sm) {}
151 
152  dim3 numBlocks;
154  std::size_t sharedMem = 0;
155  };
156 
157  template <int MT>
159  makeExecutionConfig (Long N) noexcept
160  {
161  ExecutionConfig ec(dim3{}, dim3{});
162  Long numBlocks = (std::max(N,Long(1)) + MT - 1) / MT;
163  // ensure that blockDim.x*gridDim.x does not overflow
164  numBlocks = std::min(numBlocks, Long(std::numeric_limits<unsigned int>::max()/MT));
165  // ensure that the maximum grid size of 2^31-1 won't be exceeded
166  numBlocks = std::min(numBlocks, Long(std::numeric_limits<int>::max()));
167  ec.numBlocks.x = numBlocks;
168  ec.numThreads.x = MT;
170  return ec;
171  }
172 
173  template <int MT>
174  ExecutionConfig
175  makeExecutionConfig (const Box& box) noexcept
176  {
177  return makeExecutionConfig<MT>(box.numPts());
178  }
179 #endif
180 
181 }
182 }
183 
184 
185 #ifdef AMREX_USE_GPU
186 #include <AMReX_GpuLaunchMacrosG.H>
187 #include <AMReX_GpuLaunchFunctsG.H>
188 #else
189 #include <AMReX_GpuLaunchMacrosC.H>
190 #include <AMReX_GpuLaunchFunctsC.H>
191 #endif
192 
193 #include <AMReX_GpuLaunch.nolint.H>
194 
196 
197 #endif
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_GPU_Z_STRIDE
Definition: AMReX_GpuLaunch.H:31
#define AMREX_GPU_NCELLS_PER_THREAD
Definition: AMReX_GpuLaunch.H:29
#define AMREX_GPU_Y_STRIDE
Definition: AMReX_GpuLaunch.H:30
#define AMREX_IF_ON_DEVICE(CODE)
Definition: AMReX_GpuQualifiers.H:56
#define AMREX_GPU_GLOBAL
Definition: AMReX_GpuQualifiers.H:19
#define AMREX_IF_ON_HOST(CODE)
Definition: AMReX_GpuQualifiers.H:58
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
#define AMREX_GPU_HOST_DEVICE
Definition: AMReX_GpuQualifiers.H:20
Array4< int const > offset
Definition: AMReX_HypreMLABecLap.cpp:1089
#define AMREX_D_DECL(a, b, c)
Definition: AMReX_SPACE.H:104
static void c_threads_and_blocks(const int *lo, const int *hi, dim3 &numBlocks, dim3 &numThreads) noexcept
Definition: AMReX_GpuDevice.cpp:859
static void n_threads_and_blocks(const Long N, dim3 &numBlocks, dim3 &numThreads) noexcept
Definition: AMReX_GpuDevice.cpp:844
static constexpr AMREX_EXPORT int warp_size
Definition: AMReX_GpuDevice.H:173
static void c_comps_threads_and_blocks(const int *lo, const int *hi, const int comps, dim3 &numBlocks, dim3 &numThreads) noexcept
Definition: AMReX_GpuDevice.cpp:851
static void grid_stride_threads_and_blocks(dim3 &numBlocks, dim3 &numThreads) noexcept
Definition: AMReX_GpuDevice.cpp:917
constexpr std::size_t numThreadsPerBlockParallelFor()
Definition: AMReX_GpuLaunch.H:80
ExecutionConfig makeExecutionConfig(Long N) noexcept
Definition: AMReX_GpuLaunch.H:159
AMREX_GPU_HOST_DEVICE Box getThreadBox(const Box &bx, Long offset) noexcept
Definition: AMReX_GpuLaunch.H:105
@ min
Definition: AMReX_ParallelReduce.H:18
@ max
Definition: AMReX_ParallelReduce.H:17
Definition: AMReX_Amr.cpp:49
void launch_host(L &&f0) noexcept
Definition: AMReX_GpuLaunch.H:65
BoxND< AMREX_SPACEDIM > Box
Definition: AMReX_BaseFwd.H:27
IntVectND< AMREX_SPACEDIM > IntVect
Definition: AMReX_BaseFwd.H:30
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void ignore_unused(const Ts &...)
This shuts up the compiler about unused variables.
Definition: AMReX.H:111
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE BoxND< dim > surroundingNodes(const BoxND< dim > &b, int dir) noexcept
Returns a BoxND with NODE based coordinates in direction dir that encloses BoxND b....
Definition: AMReX_Box.H:1399
AMREX_GPU_DEVICE void call_device(L &&f0) noexcept
Definition: AMReX_GpuLaunch.H:53
AMREX_GPU_GLOBAL void launch_global(L f0, Lambdas... fs)
Definition: AMReX_GpuLaunch.H:50
Definition: AMReX_GpuLaunch.H:89
Box box
Definition: AMReX_GpuLaunch.H:90
int ic
Definition: AMReX_GpuLaunch.H:91
int nc
Definition: AMReX_GpuLaunch.H:92
Definition: AMReX_GpuLaunch.H:127
ExecutionConfig(dim3 nb, dim3 nt, std::size_t sm=0) noexcept
Definition: AMReX_GpuLaunch.H:149
ExecutionConfig(Long N) noexcept
Definition: AMReX_GpuLaunch.H:146
dim3 numBlocks
Definition: AMReX_GpuLaunch.H:152
dim3 numThreads
Definition: AMReX_GpuLaunch.H:153
ExecutionConfig(const Box &box, int comps) noexcept
Definition: AMReX_GpuLaunch.H:142
ExecutionConfig(const Box &box) noexcept
Definition: AMReX_GpuLaunch.H:131
ExecutionConfig() noexcept
Definition: AMReX_GpuLaunch.H:128
std::size_t sharedMem
Definition: AMReX_GpuLaunch.H:154
Definition: AMReX_GpuLaunch.H:95
int globalBlockId
Definition: AMReX_GpuLaunch.H:98
int numBlocks
Definition: AMReX_GpuLaunch.H:96
int numThreads
Definition: AMReX_GpuLaunch.H:97