1 #ifndef AMREX_GPU_LAUNCH_FUNCTS_G_H_
2 #define AMREX_GPU_LAUNCH_FUNCTS_G_H_
3 #include <AMReX_Config.H>
11 template <
typename F,
typename N>
14 noexcept -> decltype(
f(0))
19 template <
typename F,
typename N>
29 template <
typename F, std::size_t...Ns,
class...Args>
32 noexcept -> decltype(
f(0, 0, 0, args...))
34 f(iv[0], 0, 0, args...);
37 template <
typename F, std::size_t...Ns,
class...Args>
40 noexcept -> decltype(
f(0, 0, 0, args...))
42 f(iv[0], iv[1], 0, args...);
45 template <
typename F,
int dim, std::size_t...Ns,
class...Args>
48 noexcept -> decltype(
f(iv, args...))
53 template <
typename F,
int dim, std::size_t...Ns,
class...Args>
56 noexcept -> decltype(
f(iv[Ns]..., args...))
58 f(iv[Ns]..., args...);
63 template <
typename F,
int dim>
73 template <
typename F,
int dim>
83 template <
typename F,
int dim>
91 template <
typename F,
int dim>
101 template <
typename F,
typename T,
int dim>
106 for (T n = 0; n < ncomp; ++n) {
113 template <
typename F,
typename T,
int dim>
118 for (T n = 0; n < ncomp; ++n) {
125 template <
typename F,
typename T,
int dim>
130 for (T n = 0; n < ncomp; ++n) {
135 template <
typename F,
typename T,
int dim>
140 for (T n = 0; n < ncomp; ++n) {
147 #ifdef AMREX_USE_SYCL
149 template <
typename L>
152 auto& q = *(stream.queue);
154 q.submit([&] (sycl::handler& h) {
155 h.single_task([=] () {
f(); });
157 }
catch (sycl::exception
const& ex) {
158 amrex::Abort(std::string(
"single_task: ")+ex.what()+
"!!!!!");
163 void launch (
int nblocks,
int nthreads_per_block, std::size_t shared_mem_bytes,
166 const auto nthreads_total = std::size_t(nthreads_per_block) * nblocks;
167 const std::size_t shared_mem_numull = (shared_mem_bytes+
sizeof(
unsigned long long)-1)
168 /
sizeof(
unsigned long long);
169 auto& q = *(stream.queue);
171 q.submit([&] (sycl::handler& h) {
172 sycl::local_accessor<unsigned long long>
173 shared_data(sycl::range<1>(shared_mem_numull), h);
174 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
175 sycl::range<1>(nthreads_per_block)),
176 [=] (sycl::nd_item<1> item)
179 f(Gpu::Handler{&item,shared_data.get_multi_ptr<sycl::access::decorated::yes>().
get()});
182 }
catch (sycl::exception
const& ex) {
183 amrex::Abort(std::string(
"launch: ")+ex.what()+
"!!!!!");
188 void launch (
int nblocks,
int nthreads_per_block,
gpuStream_t stream, L
const&
f) noexcept
190 const auto nthreads_total = std::size_t(nthreads_per_block) * nblocks;
191 auto& q = *(stream.queue);
193 q.submit([&] (sycl::handler& h) {
194 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
195 sycl::range<1>(nthreads_per_block)),
196 [=] (sycl::nd_item<1> item)
202 }
catch (sycl::exception
const& ex) {
203 amrex::Abort(std::string(
"launch: ")+ex.what()+
"!!!!!");
207 template <
int MT,
typename L>
211 const auto nthreads_total = MT * std::size_t(nblocks);
212 const std::size_t shared_mem_numull = (shared_mem_bytes+
sizeof(
unsigned long long)-1)
213 /
sizeof(
unsigned long long);
214 auto& q = *(stream.queue);
216 q.submit([&] (sycl::handler& h) {
217 sycl::local_accessor<unsigned long long>
218 shared_data(sycl::range<1>(shared_mem_numull), h);
219 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
221 [=] (sycl::nd_item<1> item)
222 [[sycl::reqd_work_group_size(MT)]]
225 f(Gpu::Handler{&item,shared_data.get_multi_ptr<sycl::access::decorated::yes>().
get()});
228 }
catch (sycl::exception
const& ex) {
229 amrex::Abort(std::string(
"launch: ")+ex.what()+
"!!!!!");
233 template <
int MT,
typename L>
236 const auto nthreads_total = MT * std::size_t(nblocks);
237 auto& q = *(stream.queue);
239 q.submit([&] (sycl::handler& h) {
240 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
242 [=] (sycl::nd_item<1> item)
243 [[sycl::reqd_work_group_size(MT)]]
249 }
catch (sycl::exception
const& ex) {
250 amrex::Abort(std::string(
"launch: ")+ex.what()+
"!!!!!");
254 template<
int MT,
typename T,
typename L>
255 void launch (T
const& n, L
const&
f) noexcept
258 const auto ec = Gpu::makeExecutionConfig<MT>(n);
259 const auto nthreads_per_block = ec.numThreads.x;
260 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
261 auto& q = Gpu::Device::streamQueue();
263 q.submit([&] (sycl::handler& h) {
264 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
265 sycl::range<1>(nthreads_per_block)),
266 [=] (sycl::nd_item<1> item)
267 [[sycl::reqd_work_group_size(MT)]]
270 for (
auto const i :
Gpu::Range(n,item.get_global_id(0),item.get_global_range(0))) {
275 }
catch (sycl::exception
const& ex) {
276 amrex::Abort(std::string(
"launch: ")+ex.what()+
"!!!!!");
280 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
281 void ParallelFor (Gpu::KernelInfo
const& info, T n, L
const&
f) noexcept
284 const auto ec = Gpu::makeExecutionConfig<MT>(n);
285 const auto nthreads_per_block = ec.numThreads.x;
286 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
287 auto& q = Gpu::Device::streamQueue();
289 if (info.hasReduction()) {
290 q.submit([&] (sycl::handler& h) {
291 sycl::local_accessor<unsigned long long>
293 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
294 sycl::range<1>(nthreads_per_block)),
295 [=] (sycl::nd_item<1> item)
296 [[sycl::reqd_work_group_size(MT)]]
299 for (std::size_t i = item.get_global_id(0), stride = item.get_global_range(0);
300 i < std::size_t(n); i += stride) {
301 int n_active_threads = amrex::min(std::size_t(n)-i+item.get_local_id(0),
302 item.get_local_range(0));
303 detail::call_f_scalar_handler(f, T(i), Gpu::Handler{&item, shared_data.get_multi_ptr<sycl::access::decorated::yes>().get(),
309 q.submit([&] (sycl::handler& h) {
310 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
311 sycl::range<1>(nthreads_per_block)),
312 [=] (sycl::nd_item<1> item)
313 [[sycl::reqd_work_group_size(MT)]]
316 for (std::size_t i = item.get_global_id(0), stride = item.get_global_range(0);
317 i < std::size_t(n); i += stride) {
323 }
catch (sycl::exception
const& ex) {
324 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
328 template <
int MT,
typename L,
int dim>
329 void ParallelFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, L
const&
f) noexcept
332 const BoxIndexerND<dim> indexer(box);
333 const auto ec = Gpu::makeExecutionConfig<MT>(box.numPts());
334 const auto nthreads_per_block = ec.numThreads.x;
335 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
336 auto& q = Gpu::Device::streamQueue();
338 if (info.hasReduction()) {
339 q.submit([&] (sycl::handler& h) {
340 sycl::local_accessor<unsigned long long>
341 shared_data(sycl::range<1>(Gpu::Device::warp_size), h);
342 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
343 sycl::range<1>(nthreads_per_block)),
344 [=] (sycl::nd_item<1> item)
345 [[sycl::reqd_work_group_size(MT)]]
346 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
348 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
349 icell < indexer.numPts(); icell += stride) {
350 auto iv = indexer.intVect(icell);
351 int n_active_threads = amrex::min(indexer.numPts()-icell+std::uint64_t(item.get_local_id(0)),
352 std::uint64_t(item.get_local_range(0)));
353 detail::call_f_intvect_handler(f, iv, Gpu::Handler{&item, shared_data.get_multi_ptr<sycl::access::decorated::yes>().get(),
359 q.submit([&] (sycl::handler& h) {
360 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
361 sycl::range<1>(nthreads_per_block)),
362 [=] (sycl::nd_item<1> item)
363 [[sycl::reqd_work_group_size(MT)]]
364 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
366 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
367 icell < indexer.numPts(); icell += stride) {
368 auto iv = indexer.intVect(icell);
374 }
catch (sycl::exception
const& ex) {
375 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
379 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
380 void ParallelFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, T ncomp, L
const&
f) noexcept
383 const BoxIndexerND<dim> indexer(box);
384 const auto ec = Gpu::makeExecutionConfig<MT>(box.numPts());
385 const auto nthreads_per_block = ec.numThreads.x;
386 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
387 auto& q = Gpu::Device::streamQueue();
389 if (info.hasReduction()) {
390 q.submit([&] (sycl::handler& h) {
391 sycl::local_accessor<unsigned long long>
392 shared_data(sycl::range<1>(Gpu::Device::warp_size), h);
393 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
394 sycl::range<1>(nthreads_per_block)),
395 [=] (sycl::nd_item<1> item)
396 [[sycl::reqd_work_group_size(MT)]]
397 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
399 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
400 icell < indexer.numPts(); icell += stride) {
401 auto iv = indexer.intVect(icell);
402 int n_active_threads = amrex::min(indexer.numPts()-icell+std::uint64_t(item.get_local_id(0)),
403 std::uint64_t(item.get_local_range(0)));
404 detail::call_f_intvect_ncomp_handler(f, iv, ncomp,
405 Gpu::Handler{&item, shared_data.get_multi_ptr<sycl::access::decorated::yes>().get(),
411 q.submit([&] (sycl::handler& h) {
412 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
413 sycl::range<1>(nthreads_per_block)),
414 [=] (sycl::nd_item<1> item)
415 [[sycl::reqd_work_group_size(MT)]]
416 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
418 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
419 icell < indexer.numPts(); icell += stride) {
420 auto iv = indexer.intVect(icell);
426 }
catch (sycl::exception
const& ex) {
427 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
431 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
435 const auto ec = Gpu::ExecutionConfig(n);
436 const auto nthreads_per_block = ec.numThreads.x;
437 const auto nthreads_total = std::size_t(nthreads_per_block) *
amrex::min(ec.numBlocks.x,Gpu::Device::maxBlocksPerLaunch());
438 auto& q = Gpu::Device::streamQueue();
439 auto& engdescr = *(getRandEngineDescriptor());
441 q.submit([&] (sycl::handler& h) {
442 auto engine_acc = engdescr.get_access(h);
443 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
444 sycl::range<1>(nthreads_per_block)),
445 [=] (sycl::nd_item<1> item)
446 [[sycl::reqd_work_group_size(AMREX_GPU_MAX_THREADS)]]
447 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
449 auto const tid = item.get_global_id(0);
450 auto engine = engine_acc.load(tid);
451 RandomEngine rand_eng{&engine};
452 for (std::size_t i = tid, stride = item.get_global_range(0); i < std::size_t(n); i += stride) {
455 engine_acc.store(engine, tid);
459 }
catch (sycl::exception
const& ex) {
460 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
464 template <
typename L,
int dim>
468 const BoxIndexerND<dim> indexer(box);
469 const auto ec = Gpu::ExecutionConfig(box.numPts());
470 const auto nthreads_per_block = ec.numThreads.x;
471 const auto nthreads_total = std::size_t(nthreads_per_block) *
amrex::min(ec.numBlocks.x,Gpu::Device::maxBlocksPerLaunch());
472 auto& q = Gpu::Device::streamQueue();
473 auto& engdescr = *(getRandEngineDescriptor());
475 q.submit([&] (sycl::handler& h) {
476 auto engine_acc = engdescr.get_access(h);
477 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
478 sycl::range<1>(nthreads_per_block)),
479 [=] (sycl::nd_item<1> item)
480 [[sycl::reqd_work_group_size(AMREX_GPU_MAX_THREADS)]]
481 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
483 auto const tid = item.get_global_id(0);
484 auto engine = engine_acc.load(tid);
485 RandomEngine rand_eng{&engine};
486 for (std::uint64_t icell = tid, stride = item.get_global_range(0);
487 icell < indexer.numPts(); icell += stride) {
488 auto iv = indexer.intVect(icell);
489 detail::call_f_intvect_engine(f,iv,rand_eng);
491 engine_acc.store(engine, tid);
495 }
catch (sycl::exception
const& ex) {
496 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
500 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
501 void ParallelForRNG (BoxND<dim>
const& box, T ncomp, L
const&
f) noexcept
504 const BoxIndexerND<dim> indexer(box);
505 const auto ec = Gpu::ExecutionConfig(box.numPts());
506 const auto nthreads_per_block = ec.numThreads.x;
507 const auto nthreads_total = std::size_t(nthreads_per_block) *
amrex::min(ec.numBlocks.x,Gpu::Device::maxBlocksPerLaunch());
508 auto& q = Gpu::Device::streamQueue();
509 auto& engdescr = *(getRandEngineDescriptor());
511 q.submit([&] (sycl::handler& h) {
512 auto engine_acc = engdescr.get_access(h);
513 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
514 sycl::range<1>(nthreads_per_block)),
515 [=] (sycl::nd_item<1> item)
516 [[sycl::reqd_work_group_size(AMREX_GPU_MAX_THREADS)]]
517 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
519 auto const tid = item.get_global_id(0);
520 auto engine = engine_acc.load(tid);
521 RandomEngine rand_eng{&engine};
522 for (std::uint64_t icell = tid, stride = item.get_global_range(0);
523 icell < indexer.numPts(); icell += stride) {
524 auto iv = indexer.intVect(icell);
525 detail::call_f_intvect_ncomp_engine(f,iv,ncomp,rand_eng);
527 engine_acc.store(engine, tid);
531 }
catch (sycl::exception
const& ex) {
532 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
536 template <
int MT,
typename L1,
typename L2,
int dim>
537 void ParallelFor (Gpu::KernelInfo
const& , BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
540 const BoxIndexerND<dim> indexer1(box1);
541 const BoxIndexerND<dim> indexer2(box2);
542 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max(box1.numPts(), box2.numPts()));
543 const auto nthreads_per_block = ec.numThreads.x;
544 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
545 auto& q = Gpu::Device::streamQueue();
547 q.submit([&] (sycl::handler& h) {
548 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
549 sycl::range<1>(nthreads_per_block)),
550 [=] (sycl::nd_item<1> item)
551 [[sycl::reqd_work_group_size(MT)]]
552 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
554 auto const ncells =
std::max(indexer1.numPts(), indexer2.numPts());
555 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
556 icell < ncells; icell += stride) {
557 if (icell < indexer1.numPts()) {
558 auto iv = indexer1.intVect(icell);
561 if (icell < indexer2.numPts()) {
562 auto iv = indexer2.intVect(icell);
568 }
catch (sycl::exception
const& ex) {
569 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
573 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
575 BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
576 L1&& f1, L2&& f2, L3&& f3) noexcept
579 const BoxIndexerND<dim> indexer1(box1);
580 const BoxIndexerND<dim> indexer2(box2);
581 const BoxIndexerND<dim> indexer3(box3);
582 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max({box1.numPts(),box2.numPts(),box3.numPts()}));
583 const auto nthreads_per_block = ec.numThreads.x;
584 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
585 auto& q = Gpu::Device::streamQueue();
587 q.submit([&] (sycl::handler& h) {
588 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
589 sycl::range<1>(nthreads_per_block)),
590 [=] (sycl::nd_item<1> item)
591 [[sycl::reqd_work_group_size(MT)]]
592 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
594 auto const ncells =
std::max({indexer1.numPts(), indexer2.numPts(), indexer3.numPts()});
595 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
596 icell < ncells; icell += stride) {
597 if (icell < indexer1.numPts()) {
598 auto iv = indexer1.intVect(icell);
601 if (icell < indexer2.numPts()) {
602 auto iv = indexer2.intVect(icell);
605 if (icell < indexer3.numPts()) {
606 auto iv = indexer3.intVect(icell);
612 }
catch (sycl::exception
const& ex) {
613 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
617 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
618 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
619 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
621 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
622 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
625 const BoxIndexerND<dim> indexer1(box1);
626 const BoxIndexerND<dim> indexer2(box2);
627 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max(box1.numPts(),box2.numPts()));
628 const auto nthreads_per_block = ec.numThreads.x;
629 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
630 auto& q = Gpu::Device::streamQueue();
632 q.submit([&] (sycl::handler& h) {
633 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
634 sycl::range<1>(nthreads_per_block)),
635 [=] (sycl::nd_item<1> item)
636 [[sycl::reqd_work_group_size(MT)]]
637 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
639 auto const ncells =
std::max(indexer1.numPts(), indexer2.numPts());
640 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
641 icell < ncells; icell += stride) {
642 if (icell < indexer1.numPts()) {
643 auto iv = indexer1.intVect(icell);
646 if (icell < indexer2.numPts()) {
647 auto iv = indexer2.intVect(icell);
653 }
catch (sycl::exception
const& ex) {
654 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
658 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
659 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
660 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
661 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
663 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
664 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
665 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
668 const BoxIndexerND<dim> indexer1(box1);
669 const BoxIndexerND<dim> indexer2(box2);
670 const BoxIndexerND<dim> indexer3(box3);
671 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max({box1.numPts(),box2.numPts(),box3.numPts()}));
672 const auto nthreads_per_block = ec.numThreads.x;
673 const auto nthreads_total = std::size_t(nthreads_per_block) * ec.numBlocks.x;
674 auto& q = Gpu::Device::streamQueue();
676 q.submit([&] (sycl::handler& h) {
677 h.parallel_for(sycl::nd_range<1>(sycl::range<1>(nthreads_total),
678 sycl::range<1>(nthreads_per_block)),
679 [=] (sycl::nd_item<1> item)
680 [[sycl::reqd_work_group_size(MT)]]
681 [[sycl::reqd_sub_group_size(Gpu::Device::warp_size)]]
683 auto const ncells =
std::max({indexer1.numPts(), indexer2.numPts(), indexer3.numPts()});
684 for (std::uint64_t icell = item.get_global_id(0), stride = item.get_global_range(0);
685 icell < ncells; icell += stride) {
686 if (icell < indexer1.numPts()) {
687 auto iv = indexer1.intVect(icell);
690 if (icell < indexer2.numPts()) {
691 auto iv = indexer2.intVect(icell);
694 if (icell < indexer3.numPts()) {
695 auto iv = indexer3.intVect(icell);
701 }
catch (sycl::exception
const& ex) {
702 amrex::Abort(std::string(
"ParallelFor: ")+ex.what()+
"!!!!!");
709 template <
typename L>
717 template <
int MT,
typename L>
726 template <
int MT,
typename L>
735 void launch (
int nblocks,
int nthreads_per_block, std::size_t shared_mem_bytes,
738 AMREX_ASSERT(nthreads_per_block <= AMREX_GPU_MAX_THREADS);
747 launch(nblocks, nthreads_per_block, 0, stream, std::forward<L>(
f));
750 template<
int MT,
typename T,
typename L, std::enable_if_t<std::is_
integral_v<T>,
int> FOO = 0>
753 static_assert(
sizeof(T) >= 2);
755 const auto& nec = Gpu::makeNExecutionConfigs<MT>(n);
756 for (
auto const& ec : nec) {
757 const T start_idx = T(ec.start_idx);
758 const T nleft = n - start_idx;
762 auto tid = T(MT)*T(blockIdx.x)+T(threadIdx.x);
771 template<
int MT,
int dim,
typename L>
774 if (box.isEmpty()) {
return; }
775 const auto& nec = Gpu::makeNExecutionConfigs<MT>(box);
777 const auto type = box.ixType();
778 for (
auto const& ec : nec) {
779 const auto start_idx = std::uint64_t(ec.start_idx);
782 auto icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x + start_idx;
783 if (icell < indexer.
numPts()) {
784 auto iv = indexer.
intVect(icell);
792 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
793 std::enable_if_t<MaybeDeviceRunnable<L>::value>
796 static_assert(
sizeof(T) >= 2);
798 const auto& nec = Gpu::makeNExecutionConfigs<MT>(n);
799 for (
auto const& ec : nec) {
800 const T start_idx = T(ec.start_idx);
801 const T nleft = n - start_idx;
805 auto tid = T(MT)*T(blockIdx.x)+T(threadIdx.x);
809 (std::uint64_t)MT)));
816 template <
int MT,
typename L,
int dim>
817 std::enable_if_t<MaybeDeviceRunnable<L>::value>
822 const auto& nec = Gpu::makeNExecutionConfigs<MT>(box);
823 for (
auto const& ec : nec) {
824 const auto start_idx = std::uint64_t(ec.start_idx);
827 auto icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x + start_idx;
828 if (icell < indexer.
numPts()) {
829 auto iv = indexer.
intVect(icell);
832 (std::uint64_t)MT)));
839 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
840 std::enable_if_t<MaybeDeviceRunnable<L>::value>
845 const auto& nec = Gpu::makeNExecutionConfigs<MT>(box);
846 for (
auto const& ec : nec) {
847 const auto start_idx = std::uint64_t(ec.start_idx);
850 auto icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x + start_idx;
851 if (icell < indexer.
numPts()) {
852 auto iv = indexer.
intVect(icell);
855 (std::uint64_t)MT)));
862 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
863 std::enable_if_t<MaybeDeviceRunnable<L>::value>
870 amrex::min(ec.numBlocks.x, Gpu::Device::maxBlocksPerLaunch()),
873 Long tid = Long(AMREX_GPU_MAX_THREADS)*blockIdx.x+threadIdx.x;
875 for (Long i = tid, stride = Long(AMREX_GPU_MAX_THREADS)*gridDim.x; i < Long(n); i += stride) {
883 template <
typename L,
int dim>
884 std::enable_if_t<MaybeDeviceRunnable<L>::value>
892 amrex::min(ec.numBlocks.x, Gpu::Device::maxBlocksPerLaunch()),
895 auto const tid = std::uint64_t(AMREX_GPU_MAX_THREADS)*blockIdx.x+threadIdx.x;
897 for (std::uint64_t icell = tid, stride = std::uint64_t(AMREX_GPU_MAX_THREADS)*gridDim.x; icell < indexer.
numPts(); icell += stride) {
898 auto iv = indexer.
intVect(icell);
906 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
907 std::enable_if_t<MaybeDeviceRunnable<L>::value>
915 amrex::min(ec.numBlocks.x, Gpu::Device::maxBlocksPerLaunch()),
918 auto const tid = std::uint64_t(AMREX_GPU_MAX_THREADS)*blockIdx.x+threadIdx.x;
920 for (std::uint64_t icell = tid, stride = std::uint64_t(AMREX_GPU_MAX_THREADS)*gridDim.x; icell < indexer.
numPts(); icell += stride) {
921 auto iv = indexer.
intVect(icell);
929 template <
int MT,
typename L1,
typename L2,
int dim>
930 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value>
937 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max(box1.numPts(),box2.numPts()));
941 for (std::uint64_t icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x, stride = std::uint64_t(MT)*gridDim.x;
942 icell < ncells; icell += stride) {
943 if (icell < indexer1.
numPts()) {
944 auto iv = indexer1.
intVect(icell);
947 if (icell < indexer2.
numPts()) {
948 auto iv = indexer2.
intVect(icell);
956 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
957 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value && MaybeDeviceRunnable<L3>::value>
960 L1&& f1, L2&& f2, L3&& f3) noexcept
966 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max({box1.numPts(),box2.numPts(),box3.numPts()}));
970 for (std::uint64_t icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x, stride = std::uint64_t(MT)*gridDim.x;
971 icell < ncells; icell += stride) {
972 if (icell < indexer1.
numPts()) {
973 auto iv = indexer1.
intVect(icell);
976 if (icell < indexer2.
numPts()) {
977 auto iv = indexer2.
intVect(icell);
980 if (icell < indexer3.
numPts()) {
981 auto iv = indexer3.
intVect(icell);
989 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
990 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
991 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
992 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value>
995 BoxND<dim> const& box2, T2 ncomp2, L2&& f2) noexcept
1000 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max(box1.numPts(),box2.numPts()));
1004 for (std::uint64_t icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x, stride = std::uint64_t(MT)*gridDim.x;
1005 icell < ncells; icell += stride) {
1006 if (icell < indexer1.
numPts()) {
1007 auto iv = indexer1.
intVect(icell);
1010 if (icell < indexer2.
numPts()) {
1011 auto iv = indexer2.
intVect(icell);
1019 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1020 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1021 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1022 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1023 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value && MaybeDeviceRunnable<L3>::value>
1027 BoxND<dim> const& box3, T3 ncomp3, L3&& f3) noexcept
1033 const auto ec = Gpu::makeExecutionConfig<MT>(
std::max({box1.numPts(),box2.numPts(),box3.numPts()}));
1037 for (std::uint64_t icell = std::uint64_t(MT)*blockIdx.x+threadIdx.x, stride = std::uint64_t(MT)*gridDim.x;
1038 icell < ncells; icell += stride) {
1039 if (icell < indexer1.
numPts()) {
1040 auto iv = indexer1.
intVect(icell);
1043 if (icell < indexer2.
numPts()) {
1044 auto iv = indexer2.
intVect(icell);
1047 if (icell < indexer3.
numPts()) {
1048 auto iv = indexer3.
intVect(icell);
1058 template <
typename L>
1064 template<
typename T,
typename L>
1065 void launch (T
const& n, L&&
f) noexcept
1067 launch<AMREX_GPU_MAX_THREADS>(n, std::forward<L>(
f));
1070 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1071 std::enable_if_t<MaybeDeviceRunnable<L>::value>
1074 ParallelFor<AMREX_GPU_MAX_THREADS>(info, n, std::forward<L>(
f));
1077 template <
typename L,
int dim>
1078 std::enable_if_t<MaybeDeviceRunnable<L>::value>
1081 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box, std::forward<L>(
f));
1084 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1085 std::enable_if_t<MaybeDeviceRunnable<L>::value>
1088 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box, ncomp, std::forward<L>(
f));
1091 template <
typename L1,
typename L2,
int dim>
1092 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value>
1096 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box1, box2, std::forward<L1>(f1),
1097 std::forward<L2>(f2));
1100 template <
typename L1,
typename L2,
typename L3,
int dim>
1101 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value && MaybeDeviceRunnable<L3>::value>
1104 L1&& f1, L2&& f2, L3&& f3) noexcept
1106 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box1, box2, box3, std::forward<L1>(f1),
1107 std::forward<L2>(f2), std::forward<L3>(f3));
1110 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1111 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1112 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1113 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value>
1116 BoxND<dim> const& box2, T2 ncomp2, L2&& f2) noexcept
1118 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box1, ncomp1, std::forward<L1>(f1),
1119 box2, ncomp2, std::forward<L2>(f2));
1122 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1123 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1124 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1125 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1126 std::enable_if_t<MaybeDeviceRunnable<L1>::value && MaybeDeviceRunnable<L2>::value && MaybeDeviceRunnable<L3>::value>
1130 BoxND<dim> const& box3, T3 ncomp3, L3&& f3) noexcept
1132 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box1, ncomp1, std::forward<L1>(f1),
1133 box2, ncomp2, std::forward<L2>(f2),
1134 box3, ncomp3, std::forward<L3>(f3));
1137 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1138 void For (Gpu::KernelInfo
const& info, T n, L&&
f) noexcept
1140 ParallelFor<AMREX_GPU_MAX_THREADS>(info, n,std::forward<L>(
f));
1143 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1144 void For (Gpu::KernelInfo
const& info, T n, L&&
f) noexcept
1146 ParallelFor<MT>(info, n,std::forward<L>(
f));
1149 template <
typename L,
int dim>
1150 void For (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, L&&
f) noexcept
1152 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box,std::forward<L>(
f));
1155 template <
int MT,
typename L,
int dim>
1156 void For (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, L&&
f) noexcept
1158 ParallelFor<MT>(info, box,std::forward<L>(
f));
1161 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1162 void For (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1164 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box,ncomp,std::forward<L>(
f));
1167 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1168 void For (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1170 ParallelFor<MT>(info,box,ncomp,std::forward<L>(
f));
1173 template <
typename L1,
typename L2,
int dim>
1174 void For (Gpu::KernelInfo
const& info,
1175 BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1177 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1180 template <
int MT,
typename L1,
typename L2,
int dim>
1181 void For (Gpu::KernelInfo
const& info,
1182 BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1184 ParallelFor<MT>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1187 template <
typename L1,
typename L2,
typename L3,
int dim>
1188 void For (Gpu::KernelInfo
const& info,
1189 BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1190 L1&& f1, L2&& f2, L3&& f3) noexcept
1192 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1195 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1196 void For (Gpu::KernelInfo
const& info,
1197 BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1198 L1&& f1, L2&& f2, L3&& f3) noexcept
1200 ParallelFor<MT>(info,box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1203 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1204 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1205 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1206 void For (Gpu::KernelInfo
const& info,
1207 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1208 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1210 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1213 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1214 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1215 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1216 void For (Gpu::KernelInfo
const& info,
1217 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1218 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1220 ParallelFor<MT>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1223 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1224 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1225 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1226 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1227 void For (Gpu::KernelInfo
const& info,
1228 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1229 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1230 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1232 ParallelFor<AMREX_GPU_MAX_THREADS>(info,
1233 box1,ncomp1,std::forward<L1>(f1),
1234 box2,ncomp2,std::forward<L2>(f2),
1235 box3,ncomp3,std::forward<L3>(f3));
1238 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1239 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1240 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1241 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1242 void For (Gpu::KernelInfo
const& info,
1243 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1244 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1245 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1247 ParallelFor<MT>(info,
1248 box1,ncomp1,std::forward<L1>(f1),
1249 box2,ncomp2,std::forward<L2>(f2),
1250 box3,ncomp3,std::forward<L3>(f3));
1253 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1256 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{}, n, std::forward<L>(
f));
1259 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1262 ParallelFor<MT>(Gpu::KernelInfo{}, n, std::forward<L>(
f));
1265 template <
typename L,
int dim>
1268 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{}, box, std::forward<L>(
f));
1271 template <
int MT,
typename L,
int dim>
1272 void ParallelFor (BoxND<dim>
const& box, L&&
f) noexcept
1274 ParallelFor<MT>(Gpu::KernelInfo{}, box, std::forward<L>(
f));
1277 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1280 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1283 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1284 void ParallelFor (BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1286 ParallelFor<MT>(Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1289 template <
typename L1,
typename L2,
int dim>
1290 void ParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1292 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1295 template <
int MT,
typename L1,
typename L2,
int dim>
1296 void ParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1298 ParallelFor<MT>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1301 template <
typename L1,
typename L2,
typename L3,
int dim>
1302 void ParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1303 L1&& f1, L2&& f2, L3&& f3) noexcept
1305 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1308 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1309 void ParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1310 L1&& f1, L2&& f2, L3&& f3) noexcept
1312 ParallelFor<MT>(Gpu::KernelInfo{},box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1315 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1316 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1317 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1318 void ParallelFor (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1319 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1321 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1324 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1325 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1326 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1327 void ParallelFor (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1328 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1330 ParallelFor<MT>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1333 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1334 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1335 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1336 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1337 void ParallelFor (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1338 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1339 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1341 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},
1342 box1,ncomp1,std::forward<L1>(f1),
1343 box2,ncomp2,std::forward<L2>(f2),
1344 box3,ncomp3,std::forward<L3>(f3));
1347 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1348 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1349 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1350 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1351 void ParallelFor (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1352 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1353 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1355 ParallelFor<MT>(Gpu::KernelInfo{},
1356 box1,ncomp1,std::forward<L1>(f1),
1357 box2,ncomp2,std::forward<L2>(f2),
1358 box3,ncomp3,std::forward<L3>(f3));
1361 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1364 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{}, n,std::forward<L>(
f));
1367 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1368 void For (T n, L&&
f) noexcept
1370 ParallelFor<MT>(Gpu::KernelInfo{}, n,std::forward<L>(
f));
1373 template <
typename L,
int dim>
1376 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{}, box,std::forward<L>(
f));
1379 template <
int MT,
typename L,
int dim>
1380 void For (BoxND<dim>
const& box, L&&
f) noexcept
1382 ParallelFor<MT>(Gpu::KernelInfo{}, box,std::forward<L>(
f));
1385 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1388 ParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1391 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1392 void For (BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1394 ParallelFor<MT>(Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1397 template <
typename L1,
typename L2,
int dim>
1398 void For (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1400 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1403 template <
int MT,
typename L1,
typename L2,
int dim>
1404 void For (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1406 ParallelFor<MT>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1409 template <
typename L1,
typename L2,
typename L3,
int dim>
1410 void For (BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1411 L1&& f1, L2&& f2, L3&& f3) noexcept
1413 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1416 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1417 void For (BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1418 L1&& f1, L2&& f2, L3&& f3) noexcept
1420 ParallelFor<MT>(Gpu::KernelInfo{},box1,box2,box3,std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1423 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1424 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1425 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1426 void For (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1427 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1429 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1432 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1433 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1434 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1435 void For (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1436 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1438 ParallelFor<MT>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1441 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1442 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1443 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1444 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1445 void For (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1446 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1447 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1449 ParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},
1450 box1,ncomp1,std::forward<L1>(f1),
1451 box2,ncomp2,std::forward<L2>(f2),
1452 box3,ncomp3,std::forward<L3>(f3));
1455 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1456 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1457 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1458 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1459 void For (BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1460 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1461 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1463 ParallelFor<MT>(Gpu::KernelInfo{},
1464 box1,ncomp1,std::forward<L1>(f1),
1465 box2,ncomp2,std::forward<L2>(f2),
1466 box3,ncomp3,std::forward<L3>(f3));
1469 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1470 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1474 ParallelFor<AMREX_GPU_MAX_THREADS>(info,n,std::forward<L>(
f));
1476 #ifdef AMREX_USE_SYCL
1477 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1480 for (T i = 0; i < n; ++i) {
f(i); }
1485 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1486 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1490 ParallelFor<MT>(info,n,std::forward<L>(
f));
1492 #ifdef AMREX_USE_SYCL
1493 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1496 for (T i = 0; i < n; ++i) {
f(i); }
1501 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1502 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1505 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(
Gpu::KernelInfo{}, n, std::forward<L>(
f));
1508 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1509 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1515 template <
typename L,
int dim>
1516 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1520 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box,std::forward<L>(
f));
1522 #ifdef AMREX_USE_SYCL
1523 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1530 template <
int MT,
typename L,
int dim>
1531 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1535 ParallelFor<MT>(info, box,std::forward<L>(
f));
1537 #ifdef AMREX_USE_SYCL
1538 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1545 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1546 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1550 ParallelFor<AMREX_GPU_MAX_THREADS>(info, box,ncomp,std::forward<L>(
f));
1552 #ifdef AMREX_USE_SYCL
1553 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1560 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1561 std::enable_if_t<MaybeHostDeviceRunnable<L>::value>
1565 ParallelFor<MT>(info, box,ncomp,std::forward<L>(
f));
1567 #ifdef AMREX_USE_SYCL
1568 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1575 template <
typename L1,
typename L2,
int dim>
1576 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value>
1581 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1583 #ifdef AMREX_USE_SYCL
1584 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1592 template <
int MT,
typename L1,
typename L2,
int dim>
1593 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value>
1598 ParallelFor<MT>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1600 #ifdef AMREX_USE_SYCL
1601 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1609 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1610 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value && MaybeHostDeviceRunnable<L3>::value>
1613 L1&& f1, L2&& f2, L3&& f3) noexcept
1616 ParallelFor<MT>(info,box1,box2,box3,
1617 std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1619 #ifdef AMREX_USE_SYCL
1620 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1629 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1630 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1631 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1632 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value>
1635 BoxND<dim> const& box2, T2 ncomp2, L2&& f2) noexcept
1638 ParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1640 #ifdef AMREX_USE_SYCL
1641 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1649 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1650 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1651 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1652 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value>
1655 BoxND<dim> const& box2, T2 ncomp2, L2&& f2) noexcept
1658 ParallelFor<MT>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1660 #ifdef AMREX_USE_SYCL
1661 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1669 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1670 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1671 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1672 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1673 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value && MaybeHostDeviceRunnable<L3>::value>
1677 BoxND<dim> const& box3, T3 ncomp3, L3&& f3) noexcept
1680 ParallelFor<AMREX_GPU_MAX_THREADS>(info,
1681 box1,ncomp1,std::forward<L1>(f1),
1682 box2,ncomp2,std::forward<L2>(f2),
1683 box3,ncomp3,std::forward<L3>(f3));
1685 #ifdef AMREX_USE_SYCL
1686 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1695 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1696 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1697 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1698 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1699 std::enable_if_t<MaybeHostDeviceRunnable<L1>::value && MaybeHostDeviceRunnable<L2>::value && MaybeHostDeviceRunnable<L3>::value>
1703 BoxND<dim> const& box3, T3 ncomp3, L3&& f3) noexcept
1706 ParallelFor<MT>(info,
1707 box1,ncomp1,std::forward<L1>(f1),
1708 box2,ncomp2,std::forward<L2>(f2),
1709 box3,ncomp3,std::forward<L3>(f3));
1711 #ifdef AMREX_USE_SYCL
1712 amrex::Abort(
"amrex:: HOST_DEVICE disabled for Intel. It takes too long to compile");
1721 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1722 void HostDeviceFor (Gpu::KernelInfo
const& info, T n, L&&
f) noexcept
1724 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,n,std::forward<L>(
f));
1727 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1728 void HostDeviceFor (Gpu::KernelInfo
const& info, T n, L&&
f) noexcept
1730 HostDeviceParallelFor<MT>(info,n,std::forward<L>(
f));
1733 template <
typename L,
int dim>
1734 void HostDeviceFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, L&&
f) noexcept
1736 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,box,std::forward<L>(
f));
1739 template <
int MT,
typename L,
int dim>
1740 void HostDeviceFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, L&&
f) noexcept
1742 HostDeviceParallelFor<MT>(info,box,std::forward<L>(
f));
1745 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1746 void HostDeviceFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1748 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,box,ncomp,std::forward<L>(
f));
1751 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1752 void HostDeviceFor (Gpu::KernelInfo
const& info, BoxND<dim>
const& box, T ncomp, L&&
f) noexcept
1754 HostDeviceParallelFor<MT>(info,box,ncomp,std::forward<L>(
f));
1757 template <
typename L1,
typename L2,
int dim>
1759 BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1761 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1764 template <
int MT,
typename L1,
typename L2,
int dim>
1766 BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1768 HostDeviceParallelFor<MT>(info,box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1771 template <
typename L1,
typename L2,
typename L3,
int dim>
1773 BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1774 L1&& f1, L2&& f2, L3&& f3) noexcept
1776 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info, box1,box2,box3,
1777 std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1780 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1782 BoxND<dim>
const& box1, BoxND<dim>
const& box2, BoxND<dim>
const& box3,
1783 L1&& f1, L2&& f2, L3&& f3) noexcept
1785 HostDeviceParallelFor<MT>(info, box1,box2,box3,
1786 std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1789 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1790 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1791 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1793 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1794 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1796 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1799 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1800 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1801 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1803 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1804 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1806 HostDeviceParallelFor<MT>(info,box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1809 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1810 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1811 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1812 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1814 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1815 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1816 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1818 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(info,
1819 box1,ncomp1,std::forward<L1>(f1),
1820 box2,ncomp2,std::forward<L2>(f2),
1821 box3,ncomp3,std::forward<L3>(f3));
1824 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1825 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1826 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1827 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1829 BoxND<dim>
const& box1, T1 ncomp1, L1&& f1,
1830 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1831 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1833 HostDeviceParallelFor<MT>(info,
1834 box1,ncomp1,std::forward<L1>(f1),
1835 box2,ncomp2,std::forward<L2>(f2),
1836 box3,ncomp3,std::forward<L3>(f3));
1839 template <typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1842 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},n,std::forward<L>(
f));
1845 template <int MT, typename T, typename L, typename M=std::enable_if_t<std::is_integral<T>::value> >
1848 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},n,std::forward<L>(
f));
1851 template <
typename L,
int dim>
1854 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box,std::forward<L>(
f));
1857 template <
int MT,
typename L,
int dim>
1860 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},box,std::forward<L>(
f));
1863 template <typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1866 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1869 template <int MT, typename T, typename L, int dim, typename M=std::enable_if_t<std::is_integral<T>::value> >
1872 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},box,ncomp,std::forward<L>(
f));
1875 template <
typename L1,
typename L2,
int dim>
1876 void HostDeviceParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1878 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1881 template <
int MT,
typename L1,
typename L2,
int dim>
1882 void HostDeviceParallelFor (BoxND<dim>
const& box1, BoxND<dim>
const& box2, L1&& f1, L2&& f2) noexcept
1884 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},box1,box2,std::forward<L1>(f1),std::forward<L2>(f2));
1887 template <
typename L1,
typename L2,
typename L3,
int dim>
1889 L1&& f1, L2&& f2, L3&& f3) noexcept
1891 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{}, box1,box2,box3,
1892 std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1895 template <
int MT,
typename L1,
typename L2,
typename L3,
int dim>
1897 L1&& f1, L2&& f2, L3&& f3) noexcept
1899 HostDeviceParallelFor<MT>(Gpu::KernelInfo{}, box1,box2,box3,
1900 std::forward<L1>(f1),std::forward<L2>(f2),std::forward<L3>(f3));
1903 template <
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1904 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1905 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1907 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1909 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1912 template <
int MT,
typename T1,
typename T2,
typename L1,
typename L2,
int dim,
1913 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1914 typename M2=std::enable_if_t<std::is_integral<T2>::value> >
1916 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2) noexcept
1918 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},box1,ncomp1,std::forward<L1>(f1),box2,ncomp2,std::forward<L2>(f2));
1921 template <
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1922 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1923 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1924 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1926 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1927 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1929 HostDeviceParallelFor<AMREX_GPU_MAX_THREADS>(Gpu::KernelInfo{},
1930 box1,ncomp1,std::forward<L1>(f1),
1931 box2,ncomp2,std::forward<L2>(f2),
1932 box3,ncomp3,std::forward<L3>(f3));
1935 template <
int MT,
typename T1,
typename T2,
typename T3,
typename L1,
typename L2,
typename L3,
int dim,
1936 typename M1=std::enable_if_t<std::is_integral<T1>::value>,
1937 typename M2=std::enable_if_t<std::is_integral<T2>::value>,
1938 typename M3=std::enable_if_t<std::is_integral<T3>::value> >
1940 BoxND<dim>
const& box2, T2 ncomp2, L2&& f2,
1941 BoxND<dim>
const& box3, T3 ncomp3, L3&& f3) noexcept
1943 HostDeviceParallelFor<MT>(Gpu::KernelInfo{},
1944 box1,ncomp1,std::forward<L1>(f1),
1945 box2,ncomp2,std::forward<L2>(f2),
1946 box3,ncomp3,std::forward<L3>(f3));
#define AMREX_ASSERT(EX)
Definition: AMReX_BLassert.H:38
#define AMREX_PRAGMA_SIMD
Definition: AMReX_Extension.H:80
#define AMREX_FORCE_INLINE
Definition: AMReX_Extension.H:119
#define AMREX_GPU_ERROR_CHECK()
Definition: AMReX_GpuError.H:125
#define AMREX_LAUNCH_KERNEL(MT, blocks, threads, sharedMem, stream,...)
Definition: AMReX_GpuLaunch.H:35
#define AMREX_GPU_DEVICE
Definition: AMReX_GpuQualifiers.H:18
A Rectangular Domain on an Integer Lattice.
Definition: AMReX_Box.H:43
static constexpr AMREX_EXPORT int warp_size
Definition: AMReX_GpuDevice.H:173
Definition: AMReX_GpuKernelInfo.H:8
Definition: AMReX_IntVect.H:48
AMREX_GPU_HOST_DEVICE range_detail::range_impl< T > Range(T const &b) noexcept
Definition: AMReX_GpuRange.H:125
void streamSynchronize() noexcept
Definition: AMReX_GpuDevice.H:237
bool inLaunchRegion() noexcept
Definition: AMReX_GpuControl.H:86
gpuStream_t gpuStream() noexcept
Definition: AMReX_GpuDevice.H:218
static int f(amrex::Real t, N_Vector y_data, N_Vector y_rhs, void *user_data)
Definition: AMReX_SundialsIntegrator.H:44
AMREX_FORCE_INLINE auto call_f_intvect_handler(F const &f, IntVectND< dim > iv) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv))
Definition: AMReX_GpuLaunchFunctsC.H:75
@ max
Definition: AMReX_ParallelReduce.H:17
AMREX_FORCE_INLINE auto call_f_intvect_ncomp_handler(F const &f, IntVectND< dim > iv, T n) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, n))
Definition: AMReX_GpuLaunchFunctsC.H:103
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect_ncomp_handler(F const &f, IntVectND< dim > iv, T ncomp, Gpu::Handler const &) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, 0))
Definition: AMReX_GpuLaunchFunctsG.H:127
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect_handler(F const &f, IntVectND< dim > iv, Gpu::Handler const &) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv))
Definition: AMReX_GpuLaunchFunctsG.H:85
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_scalar_handler(F const &f, N i, Gpu::Handler const &) noexcept -> decltype(f(0))
Definition: AMReX_GpuLaunchFunctsG.H:13
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect_ncomp(F const &f, IntVectND< dim > iv, T ncomp) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, 0))
Definition: AMReX_GpuLaunchFunctsG.H:103
AMREX_FORCE_INLINE auto call_f_scalar_handler(F const &f, N i) noexcept -> decltype(f(0))
Definition: AMReX_GpuLaunchFunctsC.H:13
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect(F const &f, IntVectND< dim > iv) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv))
Definition: AMReX_GpuLaunchFunctsG.H:65
AMREX_FORCE_INLINE auto call_f_intvect_engine(F const &f, IntVectND< dim > iv, RandomEngine engine) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, engine))
Definition: AMReX_GpuLaunchFunctsC.H:65
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect_engine(F const &f, IntVectND< dim > iv, RandomEngine engine) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, engine))
Definition: AMReX_GpuLaunchFunctsG.H:75
AMREX_FORCE_INLINE auto call_f_intvect_ncomp_engine(F const &f, IntVectND< dim > iv, T n, RandomEngine engine) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, n, engine))
Definition: AMReX_GpuLaunchFunctsC.H:93
AMREX_FORCE_INLINE auto call_f_intvect_inner(std::index_sequence< Ns... >, F const &f, IntVectND< 1 > iv, Args...args) noexcept -> decltype(f(0, 0, 0, args...))
Definition: AMReX_GpuLaunchFunctsC.H:31
AMREX_GPU_DEVICE AMREX_FORCE_INLINE auto call_f_intvect_ncomp_engine(F const &f, IntVectND< dim > iv, T ncomp, RandomEngine engine) noexcept -> decltype(call_f_intvect_inner(std::make_index_sequence< dim >(), f, iv, 0, engine))
Definition: AMReX_GpuLaunchFunctsG.H:115
Definition: AMReX_Amr.cpp:49
std::enable_if_t< std::is_integral_v< T > > ParallelFor(TypeList< CTOs... > ctos, std::array< int, sizeof...(CTOs)> const &runtime_options, T N, F &&f)
Definition: AMReX_CTOParallelForImpl.H:200
void ParallelFor(BoxND< dim > const &box, T ncomp, L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:1278
AMREX_ATTRIBUTE_FLATTEN_FOR void LoopConcurrentOnCpu(Dim3 lo, Dim3 hi, F const &f) noexcept
Definition: AMReX_Loop.H:378
cudaStream_t gpuStream_t
Definition: AMReX_GpuControl.H:77
AMREX_GPU_HOST_DEVICE constexpr AMREX_FORCE_INLINE const T & min(const T &a, const T &b) noexcept
Definition: AMReX_Algorithm.H:21
constexpr AMREX_GPU_HOST_DEVICE GpuTupleElement< I, GpuTuple< Ts... > >::type & get(GpuTuple< Ts... > &tup) noexcept
Definition: AMReX_Tuple.H:179
void For(BoxND< dim > const &box, T ncomp, L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:1386
void launch(T const &n, L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsC.H:120
void launch(BoxND< dim > const &box, L const &f) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:772
void HostDeviceFor(T n, L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsC.H:869
AMREX_FORCE_INLINE randState_t * getRandState()
Definition: AMReX_RandomEngine.H:55
void single_task(gpuStream_t stream, L const &f) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:710
bool isEmpty(T n) noexcept
Definition: AMReX_GpuRange.H:14
void single_task(L &&f) noexcept
Definition: AMReX_GpuLaunchFunctsC.H:1307
curandState_t randState_t
Definition: AMReX_RandomEngine.H:48
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition: AMReX.cpp:225
std::enable_if_t< MaybeDeviceRunnable< L >::value > ParallelForRNG(BoxND< dim > const &box, T ncomp, L const &f) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:908
std::enable_if_t< MaybeHostDeviceRunnable< L1 >::value &&MaybeHostDeviceRunnable< L2 >::value &&MaybeHostDeviceRunnable< L3 >::value > HostDeviceParallelFor(Gpu::KernelInfo const &info, BoxND< dim > const &box1, T1 ncomp1, L1 &&f1, BoxND< dim > const &box2, T2 ncomp2, L2 &&f2, BoxND< dim > const &box3, T3 ncomp3, L3 &&f3) noexcept
Definition: AMReX_GpuLaunchFunctsG.H:1700
Definition: AMReX_FabArrayCommI.H:841
Definition: AMReX_Box.H:2027
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE IntVectND< dim > intVect(std::uint64_t icell) const
Definition: AMReX_Box.H:2044
AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE std::uint64_t numPts() const
Definition: AMReX_Box.H:2068
Definition: AMReX_GpuLaunch.H:128
Definition: AMReX_GpuTypes.H:86
Definition: AMReX_RandomEngine.H:57