Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_FFT_R2C.H
Go to the documentation of this file.
1#ifndef AMREX_FFT_R2C_H_
2#define AMREX_FFT_R2C_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MultiFab.H>
6#include <AMReX_FFT_Helper.H>
7#include <algorithm>
8#include <numeric>
9#include <tuple>
10
11namespace amrex::FFT
12{
13
20template <typename T> class OpenBCSolver;
21template <typename T> class Poisson;
22template <typename T> class PoissonHybrid;
23
45template <typename T = Real, FFT::Direction D = FFT::Direction::both, bool C = false>
46class R2C
47{
48public:
50 using MF = std::conditional_t
51 <C, cMF, std::conditional_t<std::is_same_v<T,Real>,
53
54 template <typename U> friend class OpenBCSolver;
55 template <typename U> friend class Poisson;
56 template <typename U> friend class PoissonHybrid;
57
64 explicit R2C (Box const& domain, Info const& info = Info{});
65
75 explicit R2C (std::array<int,AMREX_SPACEDIM> const& domain_size,
76 Info const& info = Info{});
77
78 ~R2C ();
79
80 R2C (R2C const&) = delete;
81 R2C (R2C &&) = delete;
82 R2C& operator= (R2C const&) = delete;
83 R2C& operator= (R2C &&) = delete;
84
108 void setLocalDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
109 std::array<int,AMREX_SPACEDIM> const& local_size);
110
124 std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
126
155 void setLocalSpectralDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
156 std::array<int,AMREX_SPACEDIM> const& local_size);
157
176 std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
178
200 template <typename F, Direction DIR=D,
201 std::enable_if_t<DIR == Direction::both, int> = 0>
202 void forwardThenBackward (MF const& inmf, MF& outmf, F const& post_forward,
203 int incomp = 0, int outcomp = 0)
204 {
206 !m_info.twod_mode && m_info.batch_size == 1,
207 "FFT::R2C::forwardThenBackward(post_forward) currently supports only !twod_mode and batch_size==1");
208 BL_PROFILE("FFT::R2C::forwardbackward");
209 this->forward(inmf, incomp);
210 this->post_forward_doit_0(post_forward);
211 this->backward(outmf, outcomp);
212 }
213
224 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
225 DIR == Direction::both, int> = 0>
226 void forward (MF const& inmf, int incomp = 0);
227
239 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
240 DIR == Direction::both, int> = 0>
241 void forward (MF const& inmf, cMF& outmf, int incomp = 0, int outcomp = 0);
242
259 template <typename RT, typename CT, Direction DIR=D, bool CP=C,
260 std::enable_if_t<(DIR == Direction::forward ||
261 DIR == Direction::both)
262 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
263 (sizeof(RT) == sizeof(CT) && CP)), int> = 0>
264 void forward (RT const* in, CT* out);
265
275 template <Direction DIR=D, std::enable_if_t<DIR == Direction::both, int> = 0>
276 void backward (MF& outmf, int outcomp = 0);
277
289 template <Direction DIR=D, std::enable_if_t<DIR == Direction::backward ||
290 DIR == Direction::both, int> = 0>
291 void backward (cMF const& inmf, MF& outmf, int incomp = 0, int outcomp = 0);
292
309 template <typename CT, typename RT, Direction DIR=D, bool CP=C,
310 std::enable_if_t<(DIR == Direction::backward ||
311 DIR == Direction::both)
312 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
313 (sizeof(RT) == sizeof(CT) && CP)), int> = 0>
314 void backward (CT const* in, RT* out);
315
323 [[nodiscard]] T scalingFactor () const;
324
336 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
337 DIR == Direction::both, int> = 0>
338 std::pair<cMF*,IntVect> getSpectralData () const;
339
349 [[nodiscard]] std::pair<BoxArray,DistributionMapping> getSpectralDataLayout () const;
350
356 template <typename F>
357 void post_forward_doit_0 (F const& post_forward);
358
359 template <typename F>
365 void post_forward_doit_1 (F const& post_forward);
366
367private:
368
369 void prepare_openbc ();
370
371 void backward_doit (MF& outmf, IntVect const& ngout = IntVect(0),
372 Periodicity const& period = Periodicity::NonPeriodic(),
373 int outcomp = 0);
374
375 void backward_doit (cMF const& inmf, MF& outmf,
376 IntVect const& ngout = IntVect(0),
377 Periodicity const& period = Periodicity::NonPeriodic(),
378 int incomp = 0, int outcomp = 0);
379
380 std::pair<Plan<T>,Plan<T>> make_c2c_plans (cMF& inout, int ndims) const;
381
382 static Box make_domain_x (Box const& domain)
383 {
384 if constexpr (C) {
385 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(0)-1,
386 domain.length(1)-1,
387 domain.length(2)-1)),
388 domain.ixType());
389 } else {
390 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(0)/2,
391 domain.length(1)-1,
392 domain.length(2)-1)),
393 domain.ixType());
394 }
395 }
396
397 static Box make_domain_y (Box const& domain)
398 {
399 if constexpr (C) {
400 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(1)-1,
401 domain.length(0)-1,
402 domain.length(2)-1)),
403 domain.ixType());
404 } else {
405 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(1)-1,
406 domain.length(0)/2,
407 domain.length(2)-1)),
408 domain.ixType());
409 }
410 }
411
412 static Box make_domain_z (Box const& domain)
413 {
414 if constexpr (C) {
415 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(2)-1,
416 domain.length(0)-1,
417 domain.length(1)-1)),
418 domain.ixType());
419 } else {
420 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(2)-1,
421 domain.length(0)/2,
422 domain.length(1)-1)),
423 domain.ixType());
424 }
425 }
426
427 static std::pair<BoxArray,DistributionMapping>
428 make_layout_from_local_domain (std::array<int,AMREX_SPACEDIM> const& local_start,
429 std::array<int,AMREX_SPACEDIM> const& local_size);
430
431 template <typename FA, typename RT>
432 std::pair<std::unique_ptr<char,DataDeleter>,std::size_t>
433 install_raw_ptr (FA& fa, RT const* p);
434
435 Plan<T> m_fft_fwd_x{};
436 Plan<T> m_fft_bwd_x{};
437 Plan<T> m_fft_fwd_y{};
438 Plan<T> m_fft_bwd_y{};
439 Plan<T> m_fft_fwd_z{};
440 Plan<T> m_fft_bwd_z{};
441 Plan<T> m_fft_fwd_x_half{};
442 Plan<T> m_fft_bwd_x_half{};
443
444 // Comm meta-data. In the forward phase, we start with (x,y,z),
445 // transpose to (y,x,z) and then (z,x,y). In the backward phase, we
446 // perform inverse transpose.
447 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2y; // (x,y,z) -> (y,x,z)
448 std::unique_ptr<MultiBlockCommMetaData> m_cmd_y2x; // (y,x,z) -> (x,y,z)
449 std::unique_ptr<MultiBlockCommMetaData> m_cmd_y2z; // (y,x,z) -> (z,x,y)
450 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2y; // (z,x,y) -> (y,x,z)
451 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2z; // (x,y,z) -> (z,x,y)
452 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2x; // (z,x,y) -> (x,y,z)
453 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2z_half; // for openbc
454 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2x_half; // for openbc
455 Swap01 m_dtos_x2y{};
456 Swap01 m_dtos_y2x{};
457 Swap02 m_dtos_y2z{};
458 Swap02 m_dtos_z2y{};
459 RotateFwd m_dtos_x2z{};
460 RotateBwd m_dtos_z2x{};
461
462 MF m_rx;
463 cMF m_cx;
464 cMF m_cy;
465 cMF m_cz;
466
467 mutable MF m_raw_mf;
468 mutable cMF m_raw_cmf;
469
470 std::unique_ptr<char,DataDeleter> m_data_1;
471 std::unique_ptr<char,DataDeleter> m_data_2;
472
473 Box m_real_domain;
474 Box m_spectral_domain_x;
475 Box m_spectral_domain_y;
476 Box m_spectral_domain_z;
477
478 std::unique_ptr<R2C<T,D,C>> m_r2c_sub;
479 detail::SubHelper m_sub_helper;
480
481 Info m_info;
482
483 bool m_do_alld_fft = false;
484 bool m_slab_decomp = false;
485 bool m_openbc_half = false;
486};
487
488template <typename T, Direction D, bool C>
489R2C<T,D,C>::R2C (Box const& domain, Info const& info)
490 : m_real_domain(domain),
491 m_spectral_domain_x(make_domain_x(domain)),
492#if (AMREX_SPACEDIM >= 2)
493 m_spectral_domain_y(make_domain_y(domain)),
494#if (AMREX_SPACEDIM == 3)
495 m_spectral_domain_z(make_domain_z(domain)),
496#endif
497#endif
498 m_sub_helper(domain),
499 m_info(info)
500{
501 BL_PROFILE("FFT::R2C");
502
503 static_assert(std::is_same_v<float,T> || std::is_same_v<double,T>);
504
505 AMREX_ALWAYS_ASSERT(m_real_domain.numPts() > 1);
506#if (AMREX_SPACEDIM == 2)
508#else
509 if (m_info.twod_mode) {
510 AMREX_ALWAYS_ASSERT((int(domain.length(0) > 1) +
511 int(domain.length(1) > 1) +
512 int(domain.length(2) > 1)) >= 2);
513 }
514#endif
515
516 {
517 Box subbox = m_sub_helper.make_box(m_real_domain);
518 if (subbox.size() != m_real_domain.size()) {
519 m_r2c_sub = std::make_unique<R2C<T,D,C>>(subbox, m_info);
520 return;
521 }
522 }
523
524 int myproc = ParallelContext::MyProcSub();
525 int nprocs = std::min(ParallelContext::NProcsSub(), m_info.nprocs);
526
527#if (AMREX_SPACEDIM == 3)
529 if (m_info.twod_mode) {
531 } else {
532 int shortside = m_real_domain.shortside();
533 if (shortside < m_info.pencil_threshold*nprocs) {
535 } else {
537 }
538 }
539 }
540
541 if (!m_info.oned_mode) {
542 if (m_info.twod_mode) {
543 m_slab_decomp = true;
544 } else if (m_info.domain_strategy == DomainStrategy::slab && (m_real_domain.length(1) > 1)) {
545 m_slab_decomp = true;
546 }
547 }
548#endif
549
550 auto const ncomp = m_info.batch_size;
551
552 auto bax = amrex::decompose(m_real_domain, nprocs,
553 {AMREX_D_DECL(false,!m_slab_decomp,m_real_domain.length(2)>1)}, true);
554
555 DistributionMapping dmx = detail::make_iota_distromap(bax.size());
556 m_rx.define(bax, dmx, ncomp, 0, MFInfo().SetAlloc(false));
557
558 {
559 BoxList bl = bax.boxList();
560 for (auto & b : bl) {
561 b.shift(-m_real_domain.smallEnd());
562 b.setBig(0, m_spectral_domain_x.bigEnd(0));
563 }
564 BoxArray cbax(std::move(bl));
565 m_cx.define(cbax, dmx, ncomp, 0, MFInfo().SetAlloc(false));
566 }
567
568 m_do_alld_fft = (ParallelDescriptor::NProcs() == 1) &&
569 (! m_info.twod_mode) && (! m_info.oned_mode);
570
571 if (!m_do_alld_fft) // do a series of 1d or 2d ffts
572 {
573 //
574 // make data containers
575 //
576
577#if (AMREX_SPACEDIM >= 2)
579 if ((m_real_domain.length(1) > 1) && !m_slab_decomp && !m_info.oned_mode)
580 {
581 auto cbay = amrex::decompose(m_spectral_domain_y, nprocs,
582 {AMREX_D_DECL(false,true,true)}, true);
583 if (cbay.size() == dmx.size()) {
584 cdmy = dmx;
585 } else {
586 cdmy = detail::make_iota_distromap(cbay.size());
587 }
588 m_cy.define(cbay, cdmy, ncomp, 0, MFInfo().SetAlloc(false));
589 }
590#endif
591
592#if (AMREX_SPACEDIM == 3)
593 if (!m_info.oned_mode && !m_info.twod_mode &&
594 m_real_domain.length(1) > 1 &&
595 m_real_domain.length(2) > 1)
596 {
597 auto cbaz = amrex::decompose(m_spectral_domain_z, nprocs,
598 {false,true,true}, true);
600 if (cbaz.size() == dmx.size()) {
601 cdmz = dmx;
602 } else if (cbaz.size() == cdmy.size()) {
603 cdmz = cdmy;
604 } else {
605 cdmz = detail::make_iota_distromap(cbaz.size());
606 }
607 m_cz.define(cbaz, cdmz, ncomp, 0, MFInfo().SetAlloc(false));
608 }
609#endif
610
611 if constexpr (C) {
612 if (m_slab_decomp) {
613 m_data_1 = detail::make_mfs_share(m_rx, m_cx);
614 m_data_2 = detail::make_mfs_share(m_cz, m_cz);
615 } else {
616 m_data_1 = detail::make_mfs_share(m_rx, m_cz);
617 m_data_2 = detail::make_mfs_share(m_cy, m_cy);
618 // make m_cx an alias to m_rx
619 if (myproc < m_cx.size()) {
620 Box const& box = m_cx.fabbox(myproc);
621 using FAB = typename cMF::FABType::value_type;
622 m_cx.setFab(myproc, FAB(box, ncomp, m_rx[myproc].dataPtr()));
623 }
624 }
625 } else {
626 if (m_slab_decomp) {
627 m_data_1 = detail::make_mfs_share(m_rx, m_cz);
628 m_data_2 = detail::make_mfs_share(m_cx, m_cx);
629 } else {
630 m_data_1 = detail::make_mfs_share(m_rx, m_cy);
631 m_data_2 = detail::make_mfs_share(m_cx, m_cz);
632 }
633 }
634
635 //
636 // make copiers
637 //
638
639#if (AMREX_SPACEDIM >= 2)
640 if (! m_cy.empty()) {
641 // comm meta-data between x and y phases
642 m_cmd_x2y = std::make_unique<MultiBlockCommMetaData>
643 (m_cy, m_spectral_domain_y, m_cx, IntVect(0), m_dtos_x2y);
644 m_cmd_y2x = std::make_unique<MultiBlockCommMetaData>
645 (m_cx, m_spectral_domain_x, m_cy, IntVect(0), m_dtos_y2x);
646 }
647#endif
648#if (AMREX_SPACEDIM == 3)
649 if (! m_cz.empty() ) {
650 if (m_slab_decomp) {
651 // comm meta-data between xy and z phases
652 m_cmd_x2z = std::make_unique<MultiBlockCommMetaData>
653 (m_cz, m_spectral_domain_z, m_cx, IntVect(0), m_dtos_x2z);
654 m_cmd_z2x = std::make_unique<MultiBlockCommMetaData>
655 (m_cx, m_spectral_domain_x, m_cz, IntVect(0), m_dtos_z2x);
656 } else {
657 // comm meta-data between y and z phases
658 m_cmd_y2z = std::make_unique<MultiBlockCommMetaData>
659 (m_cz, m_spectral_domain_z, m_cy, IntVect(0), m_dtos_y2z);
660 m_cmd_z2y = std::make_unique<MultiBlockCommMetaData>
661 (m_cy, m_spectral_domain_y, m_cz, IntVect(0), m_dtos_z2y);
662 }
663 }
664#endif
665
666 //
667 // make plans
668 //
669
670 if (myproc < m_rx.size())
671 {
672 if constexpr (C) {
673 int ndims = m_slab_decomp ? 2 : 1;
674 std::tie(m_fft_fwd_x, m_fft_bwd_x) = make_c2c_plans(m_cx, ndims);
675 } else {
676 Box const& box = m_rx.box(myproc);
677 auto* pr = m_rx[myproc].dataPtr();
678 auto* pc = (typename Plan<T>::VendorComplex *)m_cx[myproc].dataPtr();
679#ifdef AMREX_USE_SYCL
680 m_fft_fwd_x.template init_r2c<Direction::forward>(box, pr, pc, m_slab_decomp, ncomp);
681 m_fft_bwd_x = m_fft_fwd_x;
682#else
683 if constexpr (D == Direction::both || D == Direction::forward) {
684 m_fft_fwd_x.template init_r2c<Direction::forward>(box, pr, pc, m_slab_decomp, ncomp);
685 }
686 if constexpr (D == Direction::both || D == Direction::backward) {
687 m_fft_bwd_x.template init_r2c<Direction::backward>(box, pr, pc, m_slab_decomp, ncomp);
688 }
689#endif
690 }
691 }
692
693#if (AMREX_SPACEDIM >= 2)
694 if (! m_cy.empty()) {
695 std::tie(m_fft_fwd_y, m_fft_bwd_y) = make_c2c_plans(m_cy,1);
696 }
697#endif
698#if (AMREX_SPACEDIM == 3)
699 if (! m_cz.empty()) {
700 std::tie(m_fft_fwd_z, m_fft_bwd_z) = make_c2c_plans(m_cz,1);
701 }
702#endif
703 }
704 else // do fft in all dimensions at the same time
705 {
706 if constexpr (C) {
707 m_data_1 = detail::make_mfs_share(m_rx, m_cx);
708 std::tie(m_fft_fwd_x, m_fft_bwd_x) = make_c2c_plans(m_cx,AMREX_SPACEDIM);
709 } else {
710 m_data_1 = detail::make_mfs_share(m_rx, m_rx);
711 m_data_2 = detail::make_mfs_share(m_cx, m_cx);
712
713 auto const& len = m_real_domain.length();
714 auto* pr = (void*)m_rx[0].dataPtr();
715 auto* pc = (void*)m_cx[0].dataPtr();
716#ifdef AMREX_USE_SYCL
717 m_fft_fwd_x.template init_r2c<Direction::forward>(len, pr, pc, false, ncomp);
718 m_fft_bwd_x = m_fft_fwd_x;
719#else
720 if constexpr (D == Direction::both || D == Direction::forward) {
721 m_fft_fwd_x.template init_r2c<Direction::forward>(len, pr, pc, false, ncomp);
722 }
723 if constexpr (D == Direction::both || D == Direction::backward) {
724 m_fft_bwd_x.template init_r2c<Direction::backward>(len, pr, pc, false, ncomp);
725 }
726#endif
727 }
728 }
729}
730
731template <typename T, Direction D, bool C>
732R2C<T,D,C>::R2C (std::array<int,AMREX_SPACEDIM> const& domain_size, Info const& info)
733 : R2C<T,D,C>(Box(IntVect(0),IntVect(domain_size)-1), info)
734{}
735
736template <typename T, Direction D, bool C>
738{
739 if (m_fft_bwd_x.plan != m_fft_fwd_x.plan) {
740 m_fft_bwd_x.destroy();
741 }
742 if (m_fft_bwd_y.plan != m_fft_fwd_y.plan) {
743 m_fft_bwd_y.destroy();
744 }
745 if (m_fft_bwd_z.plan != m_fft_fwd_z.plan) {
746 m_fft_bwd_z.destroy();
747 }
748 m_fft_fwd_x.destroy();
749 m_fft_fwd_y.destroy();
750 m_fft_fwd_z.destroy();
751 if (m_fft_bwd_x_half.plan != m_fft_fwd_x_half.plan) {
752 m_fft_bwd_x_half.destroy();
753 }
754 m_fft_fwd_x_half.destroy();
755}
756
757template <typename T, Direction D, bool C>
758std::pair<BoxArray,DistributionMapping>
759R2C<T,D,C>::make_layout_from_local_domain (std::array<int,AMREX_SPACEDIM> const& local_start,
760 std::array<int,AMREX_SPACEDIM> const& local_size)
761{
762 IntVect lo(local_start);
763 IntVect len(local_size);
764 Box bx(lo, lo+len-1);
765#ifdef AMREX_USE_MPI
767 MPI_Allgather(&bx, 1, ParallelDescriptor::Mpi_typemap<Box>::type(),
768 allboxes.data(), 1, ParallelDescriptor::Mpi_typemap<Box>::type(),
770 Vector<int> pmap;
771 pmap.reserve(allboxes.size());
772 for (int i = 0; i < allboxes.size(); ++i) {
773 if (allboxes[i].ok()) {
774 pmap.push_back(ParallelContext::local_to_global_rank(i));
775 }
776 }
777 allboxes.erase(std::remove_if(allboxes.begin(), allboxes.end(),
778 [=] (Box const& b) { return b.isEmpty(); }),
779 allboxes.end());
780 BoxList bl(std::move(allboxes));
781 return std::make_pair(BoxArray(std::move(bl)), DistributionMapping(std::move(pmap)));
782#else
783 return std::make_pair(BoxArray(bx), DistributionMapping(Vector<int>({0})));
784#endif
785}
786
787template <typename T, Direction D, bool C>
788void R2C<T,D,C>::setLocalDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
789 std::array<int,AMREX_SPACEDIM> const& local_size)
790{
791 auto const& [ba, dm] = make_layout_from_local_domain(local_start, local_size);
792 m_raw_mf = MF(ba, dm, m_rx.nComp(), 0, MFInfo().SetAlloc(false));
793}
794
795template <typename T, Direction D, bool C>
796std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
798{
799 m_raw_mf = MF(m_rx.boxArray(), m_rx.DistributionMap(), m_rx.nComp(), 0,
800 MFInfo{}.SetAlloc(false));
801
802 auto const myproc = ParallelContext::MyProcSub();
803 if (myproc < m_rx.size()) {
804 Box const& box = m_rx.box(myproc);
805 return std::make_pair(box.smallEnd().toArray(),
806 box.length().toArray());
807 } else {
808 return std::make_pair(std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)},
809 std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)});
810 }
811}
812
813template <typename T, Direction D, bool C>
814void R2C<T,D,C>::setLocalSpectralDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
815 std::array<int,AMREX_SPACEDIM> const& local_size)
816{
817 auto const& [ba, dm] = make_layout_from_local_domain(local_start, local_size);
818 m_raw_cmf = cMF(ba, dm, m_rx.nComp(), 0, MFInfo().SetAlloc(false));
819}
820
821template <typename T, Direction D, bool C>
822std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
824{
825 auto const ncomp = m_info.batch_size;
826 auto const& [ba, dm] = getSpectralDataLayout();
827
828 m_raw_cmf = cMF(ba, dm, ncomp, 0, MFInfo{}.SetAlloc(false));
829
830 auto const myproc = ParallelContext::MyProcSub();
831 if (myproc < m_raw_cmf.size()) {
832 Box const& box = m_raw_cmf.box(myproc);
833 return std::make_pair(box.smallEnd().toArray(), box.length().toArray());
834 } else {
835 return std::make_pair(std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)},
836 std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)});
837 }
838}
839
840template <typename T, Direction D, bool C>
842{
843 if (C || m_r2c_sub) { amrex::Abort("R2C: OpenBC not supported with reduced dimensions or complex inputs"); }
844
845#if (AMREX_SPACEDIM == 3)
846 if (m_do_alld_fft) { return; }
847
848 auto const ncomp = m_info.batch_size;
849
850 if (m_slab_decomp && ! m_fft_fwd_x_half.defined) {
851 auto* fab = detail::get_fab(m_rx);
852 if (fab) {
853 Box bottom_half = m_real_domain;
854 bottom_half.growHi(2,-m_real_domain.length(2)/2);
855 Box box = fab->box() & bottom_half;
856 if (box.ok()) {
857 auto* pr = fab->dataPtr();
858 auto* pc = (typename Plan<T>::VendorComplex *)
859 detail::get_fab(m_cx)->dataPtr();
860#ifdef AMREX_USE_SYCL
861 m_fft_fwd_x_half.template init_r2c<Direction::forward>
862 (box, pr, pc, m_slab_decomp, ncomp);
863 m_fft_bwd_x_half = m_fft_fwd_x_half;
864#else
865 if constexpr (D == Direction::both || D == Direction::forward) {
866 m_fft_fwd_x_half.template init_r2c<Direction::forward>
867 (box, pr, pc, m_slab_decomp, ncomp);
868 }
869 if constexpr (D == Direction::both || D == Direction::backward) {
870 m_fft_bwd_x_half.template init_r2c<Direction::backward>
871 (box, pr, pc, m_slab_decomp, ncomp);
872 }
873#endif
874 }
875 }
876 } // else todo
877
878 if (m_cmd_x2z && ! m_cmd_x2z_half) {
879 Box bottom_half = m_spectral_domain_z;
880 // Note that z-direction's index is 0 because we z is the
881 // unit-stride direction here.
882 bottom_half.growHi(0,-m_spectral_domain_z.length(0)/2);
883 m_cmd_x2z_half = std::make_unique<MultiBlockCommMetaData>
884 (m_cz, bottom_half, m_cx, IntVect(0), m_dtos_x2z);
885 }
886
887 if (m_cmd_z2x && ! m_cmd_z2x_half) {
888 Box bottom_half = m_spectral_domain_x;
889 bottom_half.growHi(2,-m_spectral_domain_x.length(2)/2);
890 m_cmd_z2x_half = std::make_unique<MultiBlockCommMetaData>
891 (m_cx, bottom_half, m_cz, IntVect(0), m_dtos_z2x);
892 }
893#endif
894}
895
896template <typename T, Direction D, bool C>
897template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
898 DIR == Direction::both, int> >
899void R2C<T,D,C>::forward (MF const& inmf, int incomp)
900{
901 BL_PROFILE("FFT::R2C::forward(in)");
902
903 auto const ncomp = m_info.batch_size;
904
905 if (m_r2c_sub) {
906 if (m_sub_helper.ghost_safe(inmf.nGrowVect())) {
907 m_r2c_sub->forward(m_sub_helper.make_alias_mf(inmf), incomp);
908 } else {
909 MF tmp(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
910 tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
911 m_r2c_sub->forward(m_sub_helper.make_alias_mf(tmp),0);
912 }
913 return;
914 }
915
916 if (&m_rx != &inmf) {
917 m_rx.ParallelCopy(inmf, incomp, 0, ncomp);
918 }
919
920 if (m_do_alld_fft) {
921 if constexpr (C) {
922 m_fft_fwd_x.template compute_c2c<Direction::forward>();
923 } else {
924 m_fft_fwd_x.template compute_r2c<Direction::forward>();
925 }
926 return;
927 }
928
929 auto& fft_x = m_openbc_half ? m_fft_fwd_x_half : m_fft_fwd_x;
930 if constexpr (C) {
931 fft_x.template compute_c2c<Direction::forward>();
932 } else {
933 fft_x.template compute_r2c<Direction::forward>();
934 }
935
936 if ( m_cmd_x2y) {
937 ParallelCopy(m_cy, m_cx, *m_cmd_x2y, 0, 0, ncomp, m_dtos_x2y);
938 }
939 m_fft_fwd_y.template compute_c2c<Direction::forward>();
940
941 if ( m_cmd_y2z) {
942 ParallelCopy(m_cz, m_cy, *m_cmd_y2z, 0, 0, ncomp, m_dtos_y2z);
943 }
944#if (AMREX_SPACEDIM == 3)
945 else if ( m_cmd_x2z) {
946 if (m_openbc_half) {
947 NonLocalBC::PackComponents components{};
948 components.n_components = ncomp;
950 {components, m_dtos_x2z};
951 auto handler = ParallelCopy_nowait(m_cz, m_cx, *m_cmd_x2z_half, packing);
952
953 Box upper_half = m_spectral_domain_z;
954 // Note that z-direction's index is 0 because we z is the
955 // unit-stride direction here.
956 upper_half.growLo (0,-m_spectral_domain_z.length(0)/2);
957 m_cz.setVal(0, upper_half, 0, ncomp);
958
959 ParallelCopy_finish(m_cz, std::move(handler), *m_cmd_x2z_half, packing);
960 } else {
961 ParallelCopy(m_cz, m_cx, *m_cmd_x2z, 0, 0, ncomp, m_dtos_x2z);
962 }
963 }
964#endif
965 m_fft_fwd_z.template compute_c2c<Direction::forward>();
966}
967
968template <typename T, Direction D, bool C>
969template <typename FA, typename RT>
970std::pair<std::unique_ptr<char,DataDeleter>,std::size_t>
971R2C<T,D,C>::install_raw_ptr (FA& fa, RT const* p)
972{
973 AMREX_ALWAYS_ASSERT(!fa.empty());
974
975 using FAB = typename FA::FABType::value_type;
976 using T_FAB = typename FAB::value_type;
977 static_assert(sizeof(T_FAB) == sizeof(RT));
978
979 auto const ncomp = m_info.batch_size;
980 auto const& ia = fa.IndexArray();
981
982 T_FAB* pp = nullptr;
983 std::size_t sz = 0;
984
985 if ( ! ia.empty() ) {
986 int K = ia[0];
987 Box const& box = fa.fabbox(K);
988 if ((alignof(T_FAB) == alignof(RT)) || amrex::is_aligned(p,alignof(T_FAB))) {
989 pp = (T_FAB*)p;
990 } else {
991 sz = sizeof(T_FAB) * box.numPts() * ncomp;
992 pp = (T_FAB*) The_Arena()->alloc(sz);
993 }
994 fa.setFab(K, FAB(box,ncomp,pp));
995 }
996
997 if (sz == 0) {
998 return std::make_pair(std::unique_ptr<char,DataDeleter>{},std::size_t(0));
999 } else {
1000 return std::make_pair(std::unique_ptr<char,DataDeleter>
1001 {(char*)pp,DataDeleter{The_Arena()}}, sz);
1002 }
1003}
1004
1005
1006template <typename T, Direction D, bool C>
1007template <typename RT, typename CT, Direction DIR, bool CP,
1008 std::enable_if_t<(DIR == Direction::forward ||
1009 DIR == Direction::both)
1010 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
1011 (sizeof(RT) == sizeof(CT) && CP)), int> >
1012void R2C<T,D,C>::forward (RT const* in, CT* out)
1013{
1014 auto [rdata, rsz] = install_raw_ptr(m_raw_mf, in);
1015 auto [cdata, csz] = install_raw_ptr(m_raw_cmf, out);
1016
1017 if (rsz > 0) {
1018 Gpu::dtod_memcpy_async(rdata.get(),in,rsz);
1020 }
1021
1022 forward(m_raw_mf, m_raw_cmf);
1023
1024 if (csz) {
1025 Gpu::dtod_memcpy_async(out,cdata.get(),csz);
1027 }
1028}
1029
1030template <typename T, Direction D, bool C>
1031template <Direction DIR, std::enable_if_t<DIR == Direction::both, int> >
1032void R2C<T,D,C>::backward (MF& outmf, int outcomp)
1033{
1034 backward_doit(outmf, IntVect(0), Periodicity::NonPeriodic(), outcomp);
1035}
1036
1037template <typename T, Direction D, bool C>
1038void R2C<T,D,C>::backward_doit (MF& outmf, IntVect const& ngout,
1039 Periodicity const& period, int outcomp)
1040{
1041 BL_PROFILE("FFT::R2C::backward(out)");
1042
1043 auto const ncomp = m_info.batch_size;
1044
1045 if (m_r2c_sub) {
1046 if (m_sub_helper.ghost_safe(outmf.nGrowVect())) {
1047 MF submf = m_sub_helper.make_alias_mf(outmf);
1048 IntVect const& subngout = m_sub_helper.make_iv(ngout);
1049 Periodicity const& subperiod = m_sub_helper.make_periodicity(period);
1050 m_r2c_sub->backward_doit(submf, subngout, subperiod, outcomp);
1051 } else {
1052 MF tmp(outmf.boxArray(), outmf.DistributionMap(), ncomp,
1053 m_sub_helper.make_safe_ghost(outmf.nGrowVect()));
1054 this->backward_doit(tmp, ngout, period, 0);
1055 outmf.LocalCopy(tmp, 0, outcomp, ncomp, tmp.nGrowVect());
1056 }
1057 return;
1058 }
1059
1060 if (m_do_alld_fft) {
1061 if constexpr (C) {
1062 m_fft_bwd_x.template compute_c2c<Direction::backward>();
1063 } else {
1064 m_fft_bwd_x.template compute_r2c<Direction::backward>();
1065 }
1066 outmf.ParallelCopy(m_rx, 0, outcomp, ncomp, IntVect(0),
1067 amrex::elemwiseMin(ngout,outmf.nGrowVect()), period);
1068 return;
1069 }
1070
1071 m_fft_bwd_z.template compute_c2c<Direction::backward>();
1072 if ( m_cmd_z2y) {
1073 ParallelCopy(m_cy, m_cz, *m_cmd_z2y, 0, 0, ncomp, m_dtos_z2y);
1074 }
1075#if (AMREX_SPACEDIM == 3)
1076 else if ( m_cmd_z2x) {
1077 auto const& cmd = m_openbc_half ? m_cmd_z2x_half : m_cmd_z2x;
1078 ParallelCopy(m_cx, m_cz, *cmd, 0, 0, ncomp, m_dtos_z2x);
1079 }
1080#endif
1081
1082 m_fft_bwd_y.template compute_c2c<Direction::backward>();
1083 if ( m_cmd_y2x) {
1084 ParallelCopy(m_cx, m_cy, *m_cmd_y2x, 0, 0, ncomp, m_dtos_y2x);
1085 }
1086
1087 auto& fft_x = m_openbc_half ? m_fft_bwd_x_half : m_fft_bwd_x;
1088 if constexpr (C) {
1089 fft_x.template compute_c2c<Direction::backward>();
1090 } else {
1091 fft_x.template compute_r2c<Direction::backward>();
1092 }
1093 outmf.ParallelCopy(m_rx, 0, outcomp, ncomp, IntVect(0),
1094 amrex::elemwiseMin(ngout,outmf.nGrowVect()), period);
1095}
1096
1097template <typename T, Direction D, bool C>
1098template <typename CT, typename RT, Direction DIR, bool CP,
1099 std::enable_if_t<(DIR == Direction::backward ||
1100 DIR == Direction::both)
1101 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
1102 (sizeof(RT) == sizeof(CT) && CP)), int> >
1103void R2C<T,D,C>::backward (CT const* in, RT* out)
1104{
1105 auto [rdata, rsz] = install_raw_ptr(m_raw_mf, out);
1106 auto [cdata, csz] = install_raw_ptr(m_raw_cmf, in);
1107
1108 if (csz) {
1109 Gpu::dtod_memcpy_async(cdata.get(),in,csz);
1111 }
1112
1113 backward(m_raw_cmf, m_raw_mf);
1114
1115 if (rsz > 0) {
1116 Gpu::dtod_memcpy_async(out,rdata.get(),rsz);
1118 }
1119}
1120
1121template <typename T, Direction D, bool C>
1122std::pair<Plan<T>, Plan<T>>
1123R2C<T,D,C>::make_c2c_plans (cMF& inout, int ndims) const
1124{
1125 Plan<T> fwd;
1126 Plan<T> bwd;
1127
1128 auto* fab = detail::get_fab(inout);
1129 if (!fab) { return {fwd, bwd};}
1130
1131 Box const& box = fab->box();
1132 auto* pio = (typename Plan<T>::VendorComplex *)fab->dataPtr();
1133
1134 auto const ncomp = m_info.batch_size;
1135
1136#ifdef AMREX_USE_SYCL
1137 fwd.template init_c2c<Direction::forward>(box, pio, ncomp, ndims);
1138 bwd = fwd;
1139#else
1140 if constexpr (D == Direction::both || D == Direction::forward) {
1141 fwd.template init_c2c<Direction::forward>(box, pio, ncomp, ndims);
1142 }
1143 if constexpr (D == Direction::both || D == Direction::backward) {
1144 bwd.template init_c2c<Direction::backward>(box, pio, ncomp, ndims);
1145 }
1146#endif
1147
1148 return {fwd, bwd};
1149}
1150
1151template <typename T, Direction D, bool C>
1152template <typename F>
1153void R2C<T,D,C>::post_forward_doit_0 (F const& post_forward)
1154{
1155 if (m_info.twod_mode || m_info.batch_size > 1) {
1156 amrex::Abort("xxxxx todo: post_forward");
1157#if (AMREX_SPACEDIM > 1)
1158 } else if (m_r2c_sub) {
1159 // We need to pass the originally ordered indices to post_forward.
1160#if (AMREX_SPACEDIM == 2)
1161 // The original domain is (1,ny). The sub domain is (ny,1).
1162 m_r2c_sub->post_forward_doit_1
1163 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1164 {
1165 post_forward(0, i, 0, sp);
1166 });
1167#else
1168 if (m_real_domain.length(0) == 1 && m_real_domain.length(1) == 1) {
1169 // Original domain: (1, 1, nz). Sub domain: (nz, 1, 1)
1170 m_r2c_sub->post_forward_doit_1
1171 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1172 {
1173 post_forward(0, 0, i, sp);
1174 });
1175 } else if (m_real_domain.length(0) == 1 && m_real_domain.length(2) == 1) {
1176 // Original domain: (1, ny, 1). Sub domain: (ny, 1, 1)
1177 m_r2c_sub->post_forward_doit_1
1178 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1179 {
1180 post_forward(0, i, 0, sp);
1181 });
1182 } else if (m_real_domain.length(0) == 1) {
1183 // Original domain: (1, ny, nz). Sub domain: (ny, nz, 1)
1184 m_r2c_sub->post_forward_doit_1
1185 ([=] AMREX_GPU_DEVICE (int i, int j, int, auto& sp)
1186 {
1187 post_forward(0, i, j, sp);
1188 });
1189 } else if (m_real_domain.length(1) == 1) {
1190 // Original domain: (nx, 1, nz). Sub domain: (nx, nz, 1)
1191 m_r2c_sub->post_forward_doit_1
1192 ([=] AMREX_GPU_DEVICE (int i, int j, int, auto& sp)
1193 {
1194 post_forward(i, 0, j, sp);
1195 });
1196 } else {
1197 amrex::Abort("R2c::post_forward_doit_0: how did this happen?");
1198 }
1199#endif
1200#endif
1201 } else {
1202 this->post_forward_doit_1(post_forward);
1203 }
1204}
1205
1206template <typename T, Direction D, bool C>
1207template <typename F>
1208void R2C<T,D,C>::post_forward_doit_1 (F const& post_forward)
1209{
1210 if (m_info.twod_mode || m_info.batch_size > 1) {
1211 amrex::Abort("xxxxx todo: post_forward");
1212 } else if (m_r2c_sub) {
1213 amrex::Abort("R2C::post_forward_doit_1: How did this happen?");
1214 } else {
1215 if ( ! m_cz.empty()) {
1216 auto* spectral_fab = detail::get_fab(m_cz);
1217 if (spectral_fab) {
1218 auto const& a = spectral_fab->array(); // m_cz's ordering is z,x,y
1219 ParallelForOMP(spectral_fab->box(),
1220 [=] AMREX_GPU_DEVICE (int iz, int jx, int ky)
1221 {
1222 post_forward(jx,ky,iz,a(iz,jx,ky));
1223 });
1224 }
1225 } else if ( ! m_cy.empty()) {
1226 auto* spectral_fab = detail::get_fab(m_cy);
1227 if (spectral_fab) {
1228 auto const& a = spectral_fab->array(); // m_cy's ordering is y,x,z
1229 ParallelForOMP(spectral_fab->box(),
1230 [=] AMREX_GPU_DEVICE (int iy, int jx, int k)
1231 {
1232 post_forward(jx,iy,k,a(iy,jx,k));
1233 });
1234 }
1235 } else {
1236 auto* spectral_fab = detail::get_fab(m_cx);
1237 if (spectral_fab) {
1238 auto const& a = spectral_fab->array();
1239 ParallelForOMP(spectral_fab->box(),
1240 [=] AMREX_GPU_DEVICE (int i, int j, int k)
1241 {
1242 post_forward(i,j,k,a(i,j,k));
1243 });
1244 }
1245 }
1246 }
1247}
1248
1249template <typename T, Direction D, bool C>
1251{
1252#if (AMREX_SPACEDIM == 3)
1253 if (m_info.oned_mode && !m_info.twod_mode) {
1254 return T(1)/T(Long(m_real_domain.length(0)));
1255 } else if (m_info.twod_mode) {
1256 return T(1)/T(Long(m_real_domain.length(0)) *
1257 Long(m_real_domain.length(1)));
1258 } else
1259#elif (AMREX_SPACEDIM == 2)
1260 if (m_info.oned_mode) {
1261 return T(1)/T(Long(m_real_domain.length(0)));
1262 } else
1263#endif
1264 {
1265 return T(1)/T(m_real_domain.numPts());
1266 }
1267}
1268
1269template <typename T, Direction D, bool C>
1270template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
1271 DIR == Direction::both, int> >
1272std::pair<typename R2C<T,D,C>::cMF *, IntVect>
1274{
1275#if (AMREX_SPACEDIM > 1)
1276 if (m_r2c_sub) {
1277 auto [cmf, order] = m_r2c_sub->getSpectralData();
1278 return std::make_pair(cmf, m_sub_helper.inverse_order(order));
1279 } else
1280#endif
1281 if (!m_cz.empty()) {
1282 return std::make_pair(const_cast<cMF*>(&m_cz), IntVect{AMREX_D_DECL(2,0,1)});
1283 } else if (!m_cy.empty()) {
1284 return std::make_pair(const_cast<cMF*>(&m_cy), IntVect{AMREX_D_DECL(1,0,2)});
1285 } else {
1286 return std::make_pair(const_cast<cMF*>(&m_cx), IntVect{AMREX_D_DECL(0,1,2)});
1287 }
1288}
1289
1290template <typename T, Direction D, bool C>
1291template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
1292 DIR == Direction::both, int> >
1293void R2C<T,D,C>::forward (MF const& inmf, cMF& outmf, int incomp, int outcomp)
1294{
1295 BL_PROFILE("FFT::R2C::forward(inout)");
1296
1297 auto const ncomp = m_info.batch_size;
1298
1299 if (m_r2c_sub)
1300 {
1301 bool inmf_safe = m_sub_helper.ghost_safe(inmf.nGrowVect());
1302 MF inmf_sub, inmf_tmp;
1303 int incomp_sub;
1304 if (inmf_safe) {
1305 inmf_sub = m_sub_helper.make_alias_mf(inmf);
1306 incomp_sub = incomp;
1307 } else {
1308 inmf_tmp.define(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
1309 inmf_tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
1310 inmf_sub = m_sub_helper.make_alias_mf(inmf_tmp);
1311 incomp_sub = 0;
1312 }
1313
1314 bool outmf_safe = m_sub_helper.ghost_safe(outmf.nGrowVect());
1315 cMF outmf_sub, outmf_tmp;
1316 int outcomp_sub;
1317 if (outmf_safe) {
1318 outmf_sub = m_sub_helper.make_alias_mf(outmf);
1319 outcomp_sub = outcomp;
1320 } else {
1321 outmf_tmp.define(outmf.boxArray(), outmf.DistributionMap(), ncomp, 0);
1322 outmf_sub = m_sub_helper.make_alias_mf(outmf_tmp);
1323 outcomp_sub = 0;
1324 }
1325
1326 m_r2c_sub->forward(inmf_sub, outmf_sub, incomp_sub, outcomp_sub);
1327
1328 if (!outmf_safe) {
1329 outmf.LocalCopy(outmf_tmp, 0, outcomp, ncomp, IntVect(0));
1330 }
1331 }
1332 else
1333 {
1334 forward(inmf, incomp);
1335 if (!m_cz.empty()) { // m_cz's order (z,x,y) -> (x,y,z)
1336 RotateBwd dtos{};
1338 (outmf, m_spectral_domain_x, m_cz, IntVect(0), dtos);
1339 ParallelCopy(outmf, m_cz, cmd, 0, outcomp, ncomp, dtos);
1340 } else if (!m_cy.empty()) { // m_cy's order (y,x,z) -> (x,y,z)
1342 (outmf, m_spectral_domain_x, m_cy, IntVect(0), m_dtos_y2x);
1343 ParallelCopy(outmf, m_cy, cmd, 0, outcomp, ncomp, m_dtos_y2x);
1344 } else {
1345 outmf.ParallelCopy(m_cx, 0, outcomp, ncomp);
1346 }
1347 }
1348}
1349
1350template <typename T, Direction D, bool C>
1351template <Direction DIR, std::enable_if_t<DIR == Direction::backward ||
1352 DIR == Direction::both, int> >
1353void R2C<T,D,C>::backward (cMF const& inmf, MF& outmf, int incomp, int outcomp)
1354{
1355 backward_doit(inmf, outmf, IntVect(0), Periodicity::NonPeriodic(), incomp, outcomp);
1356}
1357
1358template <typename T, Direction D, bool C>
1359void R2C<T,D,C>::backward_doit (cMF const& inmf, MF& outmf, IntVect const& ngout,
1360 Periodicity const& period, int incomp, int outcomp)
1361{
1362 BL_PROFILE("FFT::R2C::backward(inout)");
1363
1364 auto const ncomp = m_info.batch_size;
1365
1366 if (m_r2c_sub)
1367 {
1368 bool inmf_safe = m_sub_helper.ghost_safe(inmf.nGrowVect());
1369 cMF inmf_sub, inmf_tmp;
1370 int incomp_sub;
1371 if (inmf_safe) {
1372 inmf_sub = m_sub_helper.make_alias_mf(inmf);
1373 incomp_sub = incomp;
1374 } else {
1375 inmf_tmp.define(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
1376 inmf_tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
1377 inmf_sub = m_sub_helper.make_alias_mf(inmf_tmp);
1378 incomp_sub = 0;
1379 }
1380
1381 bool outmf_safe = m_sub_helper.ghost_safe(outmf.nGrowVect());
1382 MF outmf_sub, outmf_tmp;
1383 int outcomp_sub;
1384 if (outmf_safe) {
1385 outmf_sub = m_sub_helper.make_alias_mf(outmf);
1386 outcomp_sub = outcomp;
1387 } else {
1388 IntVect const& ngtmp = m_sub_helper.make_safe_ghost(outmf.nGrowVect());
1389 outmf_tmp.define(outmf.boxArray(), outmf.DistributionMap(), ncomp, ngtmp);
1390 outmf_sub = m_sub_helper.make_alias_mf(outmf_tmp);
1391 outcomp_sub = 0;
1392 }
1393
1394 IntVect const& subngout = m_sub_helper.make_iv(ngout);
1395 Periodicity const& subperiod = m_sub_helper.make_periodicity(period);
1396 m_r2c_sub->backward_doit(inmf_sub, outmf_sub, subngout, subperiod, incomp_sub, outcomp_sub);
1397
1398 if (!outmf_safe) {
1399 outmf.LocalCopy(outmf_tmp, 0, outcomp, ncomp, outmf_tmp.nGrowVect());
1400 }
1401 }
1402 else
1403 {
1404 if (!m_cz.empty()) { // (x,y,z) -> m_cz's order (z,x,y)
1405 RotateFwd dtos{};
1406 MultiBlockCommMetaData cmd
1407 (m_cz, m_spectral_domain_z, inmf, IntVect(0), dtos);
1408 ParallelCopy(m_cz, inmf, cmd, incomp, 0, ncomp, dtos);
1409 } else if (!m_cy.empty()) { // (x,y,z) -> m_cy's ordering (y,x,z)
1410 MultiBlockCommMetaData cmd
1411 (m_cy, m_spectral_domain_y, inmf, IntVect(0), m_dtos_x2y);
1412 ParallelCopy(m_cy, inmf, cmd, incomp, 0, ncomp, m_dtos_x2y);
1413 } else {
1414 m_cx.ParallelCopy(inmf, incomp, 0, ncomp);
1415 }
1416 backward_doit(outmf, ngout, period, outcomp);
1417 }
1418}
1419
1420template <typename T, Direction D, bool C>
1421std::pair<BoxArray,DistributionMapping>
1423{
1424#if (AMREX_SPACEDIM > 1)
1425 if (m_r2c_sub) {
1426 auto const& [ba, dm] = m_r2c_sub->getSpectralDataLayout();
1427 return std::make_pair(m_sub_helper.inverse_boxarray(ba), dm);
1428 }
1429#endif
1430
1431#if (AMREX_SPACEDIM == 3)
1432 if (!m_cz.empty()) {
1433 BoxList bl = m_cz.boxArray().boxList();
1434 for (auto& b : bl) {
1435 auto lo = b.smallEnd();
1436 auto hi = b.bigEnd();
1437 std::swap(lo[0], lo[1]);
1438 std::swap(lo[1], lo[2]);
1439 std::swap(hi[0], hi[1]);
1440 std::swap(hi[1], hi[2]);
1441 b.setSmall(lo);
1442 b.setBig(hi);
1443 }
1444 return std::make_pair(BoxArray(std::move(bl)), m_cz.DistributionMap());
1445 } else
1446#endif
1447#if (AMREX_SPACEDIM >= 2)
1448 if (!m_cy.empty()) {
1449 BoxList bl = m_cy.boxArray().boxList();
1450 for (auto& b : bl) {
1451 auto lo = b.smallEnd();
1452 auto hi = b.bigEnd();
1453 std::swap(lo[0], lo[1]);
1454 std::swap(hi[0], hi[1]);
1455 b.setSmall(lo);
1456 b.setBig(hi);
1457 }
1458 return std::make_pair(BoxArray(std::move(bl)), m_cy.DistributionMap());
1459 } else
1460#endif
1461 {
1462 return std::make_pair(m_cx.boxArray(), m_cx.DistributionMap());
1463 }
1464}
1465
1467template <typename T = Real, FFT::Direction D = FFT::Direction::both>
1469
1470}
1471
1472#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
virtual void * alloc(std::size_t sz)=0
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:568
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
BoxList & shift(int dir, int nzones)
Applies Box::shift(int,int) to each Box in the BoxList.
Definition AMReX_BoxList.cpp:565
__host__ __device__ const IntVectND< dim > & bigEnd() const &noexcept
Return the inclusive upper bound of the box.
Definition AMReX_Box.H:123
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ int shortside(int &dir) const noexcept
Return length of shortest side. dir is modified to give direction with shortest side: 0....
Definition AMReX_Box.H:437
__host__ __device__ IntVectND< dim > size() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:147
__host__ __device__ BoxND & growLo(int idir, int n_cell=1) noexcept
Grow the BoxND on the low end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the Bo...
Definition AMReX_Box.H:662
__host__ __device__ IndexTypeND< dim > ixType() const noexcept
Return the indexing type.
Definition AMReX_Box.H:135
__host__ __device__ BoxND & growHi(int idir, int n_cell=1) noexcept
Grow the BoxND on the high end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the B...
Definition AMReX_Box.H:673
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Long size() const noexcept
Length of the underlying processor map.
Definition AMReX_DistributionMapping.H:129
Convolution-based solver for open boundary conditions using Green's functions.
Definition AMReX_FFT_OpenBCSolver.H:24
3D Poisson solver for periodic, Dirichlet & Neumann boundaries in the first two dimensions,...
Definition AMReX_FFT_Poisson.H:187
Poisson solver for periodic, Dirichlet & Neumann boundaries using FFT.
Definition AMReX_FFT_Poisson.H:67
Parallel Discrete Fourier Transform.
Definition AMReX_FFT_R2C.H:47
std::conditional_t< C, cMF, std::conditional_t< std::is_same_v< T, Real >, MultiFab, FabArray< BaseFab< T > > > > MF
Definition AMReX_FFT_R2C.H:52
R2C & operator=(R2C const &)=delete
void forward(MF const &inmf, cMF &outmf, int incomp=0, int outcomp=0)
Forward transform.
Definition AMReX_FFT_R2C.H:1293
~R2C()
Definition AMReX_FFT_R2C.H:737
void setLocalDomain(std::array< int, 3 > const &local_start, std::array< int, 3 > const &local_size)
Set local domain.
Definition AMReX_FFT_R2C.H:788
R2C(Box const &domain, Info const &info=Info{})
Constructor.
Definition AMReX_FFT_R2C.H:489
void backward(cMF const &inmf, MF &outmf, int incomp=0, int outcomp=0)
Backward transform.
Definition AMReX_FFT_R2C.H:1353
R2C(R2C &&)=delete
void backward(MF &outmf, int outcomp=0)
Backward transform.
Definition AMReX_FFT_R2C.H:1032
void post_forward_doit_1(F const &post_forward)
CUDA-visible helper that redistributes and applies post_forward for the batched layout.
Definition AMReX_FFT_R2C.H:1208
T scalingFactor() const
Scaling factor. If the data goes through forward and then backward, the result multiplied by the scal...
Definition AMReX_FFT_R2C.H:1250
void post_forward_doit_0(F const &post_forward)
CUDA-visible hook that walks internal spectral data and applies post_forward.
Definition AMReX_FFT_R2C.H:1153
std::pair< std::array< int, 3 >, std::array< int, 3 > > getLocalDomain() const
Get local domain.
Definition AMReX_FFT_R2C.H:797
void forward(RT const *in, CT *out)
Forward transform.
Definition AMReX_FFT_R2C.H:1012
R2C(R2C const &)=delete
std::pair< BoxArray, DistributionMapping > getSpectralDataLayout() const
Get BoxArray and DistributionMapping for spectral data.
Definition AMReX_FFT_R2C.H:1422
std::pair< cMF *, IntVect > getSpectralData() const
Get the internal spectral data.
R2C(std::array< int, 3 > const &domain_size, Info const &info=Info{})
Constructor.
Definition AMReX_FFT_R2C.H:732
FabArray< BaseFab< GpuComplex< T > > > cMF
Definition AMReX_FFT_R2C.H:49
std::pair< std::array< int, 3 >, std::array< int, 3 > > getLocalSpectralDomain() const
Get local spectral domain.
Definition AMReX_FFT_R2C.H:823
void forwardThenBackward(MF const &inmf, MF &outmf, F const &post_forward, int incomp=0, int outcomp=0)
Forward and then backward transform.
Definition AMReX_FFT_R2C.H:202
void setLocalSpectralDomain(std::array< int, 3 > const &local_start, std::array< int, 3 > const &local_size)
Set local spectral domain.
Definition AMReX_FFT_R2C.H:814
void backward(CT const *in, RT *out)
Backward transform.
Definition AMReX_FFT_R2C.H:1103
void forward(MF const &inmf, int incomp=0)
Forward transform.
Definition AMReX_FFT_R2C.H:899
IntVect nGrowVect() const noexcept
Definition AMReX_FabArrayBase.H:80
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
bool empty() const noexcept
Definition AMReX_FabArrayBase.H:89
Box fabbox(int K) const noexcept
Return the Kth FABs Box in the FabArray. That is, the region the Kth fab is actually defined on.
Definition AMReX_FabArrayBase.cpp:217
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
void setFab(int boxno, std::unique_ptr< FAB > elem)
Explicitly set the Kth FAB in the FabArray to point to elem.
Definition AMReX_FabArray.H:2358
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition AMReX_FabArray.H:850
typename std::conditional_t< IsBaseFab< BaseFab< GpuComplex< T > > >::value, BaseFab< GpuComplex< T > >, FABType >::value_type value_type
Definition AMReX_FabArray.H:361
void define(const BoxArray &bxs, const DistributionMapping &dm, int nvar, int ngrow, const MFInfo &info=MFInfo(), const FabFactory< FAB > &factory=DefaultFabFactory< FAB >())
Define this FabArray identically to that performed by the constructor having an analogous function si...
Definition AMReX_FabArray.H:2174
void LocalCopy(FabArray< SFAB > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
Perform local copy of FabArray data.
Definition AMReX_FabArray.H:1955
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition AMReX_Periodicity.cpp:52
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
amrex_long Long
Definition AMReX_INT.H:30
void ParallelForOMP(T n, L const &f) noexcept
Performance-portable kernel launch function with optional OpenMP threading.
Definition AMReX_GpuLaunch.H:319
Arena * The_Arena()
Definition AMReX_Arena.cpp:805
int NProcs() noexcept
Definition AMReX_ParallelDescriptor.H:255
Definition AMReX_FFT_Helper.H:52
Direction
Definition AMReX_FFT_Helper.H:54
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:449
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int local_to_global_rank(int rank) noexcept
translate between local rank and global rank
Definition AMReX_ParallelContext.H:95
int NProcsSub() noexcept
number of ranks in current frame
Definition AMReX_ParallelContext.H:74
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
bool is_aligned(const void *p, std::size_t alignment) noexcept
Return whether the address p is aligned to alignment bytes.
Definition AMReX_Arena.H:39
BoxArray decompose(Box const &domain, int nboxes, Array< bool, 3 > const &decomp, bool no_overlap)
Decompose domain box into BoxArray.
Definition AMReX_BoxArray.cpp:1947
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:2019
__host__ __device__ constexpr T elemwiseMin(T const &a, T const &b) noexcept
Return the element-wise minimum of the given values for types like XDim3.
Definition AMReX_Algorithm.H:62
Definition AMReX_FFT_Helper.H:64
bool twod_mode
Definition AMReX_FFT_Helper.H:75
bool oned_mode
Definition AMReX_FFT_Helper.H:84
int batch_size
Batched FFT size. Only support in R2C, not R2X.
Definition AMReX_FFT_Helper.H:87
DomainStrategy domain_strategy
Domain composition strategy.
Definition AMReX_FFT_Helper.H:66
int nprocs
Max number of processes to use.
Definition AMReX_FFT_Helper.H:90
int pencil_threshold
Definition AMReX_FFT_Helper.H:70
Definition AMReX_FFT_Helper.H:194
std::conditional_t< std::is_same_v< float, T >, cuComplex, cuDoubleComplex > VendorComplex
Definition AMReX_FFT_Helper.H:198
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
MFInfo & SetAlloc(bool a) noexcept
Definition AMReX_FabArray.H:73
This class specializes behaviour on local copies and unpacking receive buffers.
Definition AMReX_NonLocalBC.H:626
This is the index mapping based on the DTOS MultiBlockDestToSrc.
Definition AMReX_NonLocalBC.H:216
Contains information about which components take part of the data transaction.
Definition AMReX_NonLocalBC.H:539
int n_components
Definition AMReX_NonLocalBC.H:542
Communication datatype (note: this structure also works without MPI)
Definition AMReX_ccse-mpi.H:78