Block-Structured AMR Software Framework
Loading...
Searching...
No Matches
AMReX_FFT_R2C.H
Go to the documentation of this file.
1#ifndef AMREX_FFT_R2C_H_
2#define AMREX_FFT_R2C_H_
3#include <AMReX_Config.H>
4
5#include <AMReX_MultiFab.H>
6#include <AMReX_FFT_Helper.H>
7#include <algorithm>
8#include <numeric>
9#include <tuple>
10
11namespace amrex::FFT
12{
13
20template <typename T> class OpenBCSolver;
21template <typename T> class Poisson;
22template <typename T> class PoissonHybrid;
23
45template <typename T = Real, FFT::Direction D = FFT::Direction::both, bool C = false>
46class R2C
47{
48public:
50 using MF = std::conditional_t
51 <C, cMF, std::conditional_t<std::is_same_v<T,Real>,
53
54 template <typename U> friend class OpenBCSolver;
55 template <typename U> friend class Poisson;
56 template <typename U> friend class PoissonHybrid;
57
64 explicit R2C (Box const& domain, Info const& info = Info{});
65
75 explicit R2C (std::array<int,AMREX_SPACEDIM> const& domain_size,
76 Info const& info = Info{});
77
78 ~R2C ();
79
80 R2C (R2C const&) = delete;
81 R2C (R2C &&) = delete;
82 R2C& operator= (R2C const&) = delete;
83 R2C& operator= (R2C &&) = delete;
84
108 void setLocalDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
109 std::array<int,AMREX_SPACEDIM> const& local_size);
110
124 std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
126
155 void setLocalSpectralDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
156 std::array<int,AMREX_SPACEDIM> const& local_size);
157
176 std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
178
200 template <typename F, Direction DIR=D,
201 std::enable_if_t<DIR == Direction::both, int> = 0>
202 void forwardThenBackward (MF const& inmf, MF& outmf, F const& post_forward,
203 int incomp = 0, int outcomp = 0)
204 {
206 !m_info.twod_mode && m_info.batch_size == 1,
207 "FFT::R2C::forwardThenBackward(post_forward) currently supports only !twod_mode and batch_size==1");
208 BL_PROFILE("FFT::R2C::forwardbackward");
209 this->forward(inmf, incomp);
210 this->post_forward_doit_0(post_forward);
211 this->backward(outmf, outcomp);
212 }
213
224 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
225 DIR == Direction::both, int> = 0>
226 void forward (MF const& inmf, int incomp = 0);
227
239 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
240 DIR == Direction::both, int> = 0>
241 void forward (MF const& inmf, cMF& outmf, int incomp = 0, int outcomp = 0);
242
259 template <typename RT, typename CT, Direction DIR=D, bool CP=C,
260 std::enable_if_t<(DIR == Direction::forward ||
261 DIR == Direction::both)
262 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
263 (sizeof(RT) == sizeof(CT) && CP)), int> = 0>
264 void forward (RT const* in, CT* out);
265
275 template <Direction DIR=D, std::enable_if_t<DIR == Direction::both, int> = 0>
276 void backward (MF& outmf, int outcomp = 0);
277
289 template <Direction DIR=D, std::enable_if_t<DIR == Direction::backward ||
290 DIR == Direction::both, int> = 0>
291 void backward (cMF const& inmf, MF& outmf, int incomp = 0, int outcomp = 0);
292
309 template <typename CT, typename RT, Direction DIR=D, bool CP=C,
310 std::enable_if_t<(DIR == Direction::backward ||
311 DIR == Direction::both)
312 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
313 (sizeof(RT) == sizeof(CT) && CP)), int> = 0>
314 void backward (CT const* in, RT* out);
315
323 [[nodiscard]] T scalingFactor () const;
324
336 template <Direction DIR=D, std::enable_if_t<DIR == Direction::forward ||
337 DIR == Direction::both, int> = 0>
338 std::pair<cMF*,IntVect> getSpectralData () const;
339
349 [[nodiscard]] std::pair<BoxArray,DistributionMapping> getSpectralDataLayout () const;
350
356 template <typename F>
357 void post_forward_doit_0 (F const& post_forward);
358
359 template <typename F>
365 void post_forward_doit_1 (F const& post_forward);
366
367private:
368
369 void prepare_openbc ();
370
371 void backward_doit (MF& outmf, IntVect const& ngout = IntVect(0),
372 Periodicity const& period = Periodicity::NonPeriodic(),
373 int outcomp = 0);
374
375 void backward_doit (cMF const& inmf, MF& outmf,
376 IntVect const& ngout = IntVect(0),
377 Periodicity const& period = Periodicity::NonPeriodic(),
378 int incomp = 0, int outcomp = 0);
379
380 std::pair<Plan<T>,Plan<T>> make_c2c_plans (cMF& inout, int ndims) const;
381
382 static Box make_domain_x (Box const& domain)
383 {
384 if constexpr (C) {
385 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(0)-1,
386 domain.length(1)-1,
387 domain.length(2)-1)),
388 domain.ixType());
389 } else {
390 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(0)/2,
391 domain.length(1)-1,
392 domain.length(2)-1)),
393 domain.ixType());
394 }
395 }
396
397 static Box make_domain_y (Box const& domain)
398 {
399 if constexpr (C) {
400 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(1)-1,
401 domain.length(0)-1,
402 domain.length(2)-1)),
403 domain.ixType());
404 } else {
405 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(1)-1,
406 domain.length(0)/2,
407 domain.length(2)-1)),
408 domain.ixType());
409 }
410 }
411
412 static Box make_domain_z (Box const& domain)
413 {
414 if constexpr (C) {
415 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(2)-1,
416 domain.length(0)-1,
417 domain.length(1)-1)),
418 domain.ixType());
419 } else {
420 return Box(IntVect(0), IntVect(AMREX_D_DECL(domain.length(2)-1,
421 domain.length(0)/2,
422 domain.length(1)-1)),
423 domain.ixType());
424 }
425 }
426
427 static std::pair<BoxArray,DistributionMapping>
428 make_layout_from_local_domain (std::array<int,AMREX_SPACEDIM> const& local_start,
429 std::array<int,AMREX_SPACEDIM> const& local_size);
430
431 template <typename FA, typename RT>
432 std::pair<std::unique_ptr<char,DataDeleter>,std::size_t>
433 install_raw_ptr (FA& fa, RT const* p);
434
435 Plan<T> m_fft_fwd_x{};
436 Plan<T> m_fft_bwd_x{};
437 Plan<T> m_fft_fwd_y{};
438 Plan<T> m_fft_bwd_y{};
439 Plan<T> m_fft_fwd_z{};
440 Plan<T> m_fft_bwd_z{};
441 Plan<T> m_fft_fwd_x_half{};
442 Plan<T> m_fft_bwd_x_half{};
443
444 // Comm meta-data. In the forward phase, we start with (x,y,z),
445 // transpose to (y,x,z) and then (z,x,y). In the backward phase, we
446 // perform inverse transpose.
447 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2y; // (x,y,z) -> (y,x,z)
448 std::unique_ptr<MultiBlockCommMetaData> m_cmd_y2x; // (y,x,z) -> (x,y,z)
449 std::unique_ptr<MultiBlockCommMetaData> m_cmd_y2z; // (y,x,z) -> (z,x,y)
450 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2y; // (z,x,y) -> (y,x,z)
451 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2z; // (x,y,z) -> (z,x,y)
452 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2x; // (z,x,y) -> (x,y,z)
453 std::unique_ptr<MultiBlockCommMetaData> m_cmd_x2z_half; // for openbc
454 std::unique_ptr<MultiBlockCommMetaData> m_cmd_z2x_half; // for openbc
455 Swap01 m_dtos_x2y{};
456 Swap01 m_dtos_y2x{};
457 Swap02 m_dtos_y2z{};
458 Swap02 m_dtos_z2y{};
459 RotateFwd m_dtos_x2z{};
460 RotateBwd m_dtos_z2x{};
461
462 MF m_rx;
463 cMF m_cx;
464 cMF m_cy;
465 cMF m_cz;
466
467 mutable MF m_raw_mf;
468 mutable cMF m_raw_cmf;
469
470 std::unique_ptr<char,DataDeleter> m_data_1;
471 std::unique_ptr<char,DataDeleter> m_data_2;
472
473 Box m_real_domain;
474 Box m_spectral_domain_x;
475 Box m_spectral_domain_y;
476 Box m_spectral_domain_z;
477
478 std::unique_ptr<R2C<T,D,C>> m_r2c_sub;
479 detail::SubHelper m_sub_helper;
480
481 Info m_info;
482
483 bool m_do_alld_fft = false;
484 bool m_slab_decomp = false;
485 bool m_openbc_half = false;
486};
487
488template <typename T, Direction D, bool C>
489R2C<T,D,C>::R2C (Box const& domain, Info const& info)
490 : m_real_domain(domain),
491 m_spectral_domain_x(make_domain_x(domain)),
492#if (AMREX_SPACEDIM >= 2)
493 m_spectral_domain_y(make_domain_y(domain)),
494#if (AMREX_SPACEDIM == 3)
495 m_spectral_domain_z(make_domain_z(domain)),
496#endif
497#endif
498 m_sub_helper(domain),
499 m_info(info)
500{
501 BL_PROFILE("FFT::R2C");
502
503 static_assert(std::is_same_v<float,T> || std::is_same_v<double,T>);
504
505 AMREX_ALWAYS_ASSERT(m_real_domain.numPts() > 1);
506#if (AMREX_SPACEDIM == 2)
508#else
509 if (m_info.twod_mode) {
510 AMREX_ALWAYS_ASSERT((int(domain.length(0) > 1) +
511 int(domain.length(1) > 1) +
512 int(domain.length(2) > 1)) >= 2);
513 }
514#endif
515
516 {
517 Box subbox = m_sub_helper.make_box(m_real_domain);
518 if (subbox.size() != m_real_domain.size()) {
519 m_r2c_sub = std::make_unique<R2C<T,D,C>>(subbox, m_info);
520 return;
521 }
522 }
523
524 int myproc = ParallelContext::MyProcSub();
525 int nprocs = std::min(ParallelContext::NProcsSub(), m_info.nprocs);
526
527#if (AMREX_SPACEDIM == 3)
529 if (m_info.twod_mode) {
531 } else {
532 int shortside = m_real_domain.shortside();
533 if (shortside < m_info.pencil_threshold*nprocs) {
535 } else {
537 }
538 }
539 }
540
541 if (!m_info.oned_mode) {
542 if (m_info.twod_mode) {
543 m_slab_decomp = true;
544 } else if (m_info.domain_strategy == DomainStrategy::slab && (m_real_domain.length(1) > 1)) {
545 m_slab_decomp = true;
546 }
547 }
548
549#endif
550
551 auto const ncomp = m_info.batch_size;
552
553 auto bax = amrex::decompose(m_real_domain, nprocs,
554 {AMREX_D_DECL(false,!m_slab_decomp,m_real_domain.length(2)>1)}, true);
555
556 DistributionMapping dmx = detail::make_iota_distromap(bax.size());
557 m_rx.define(bax, dmx, ncomp, 0, MFInfo().SetAlloc(false));
558
559 {
560 BoxList bl = bax.boxList();
561 for (auto & b : bl) {
562 b.shift(-m_real_domain.smallEnd());
563 b.setBig(0, m_spectral_domain_x.bigEnd(0));
564 }
565 BoxArray cbax(std::move(bl));
566 m_cx.define(cbax, dmx, ncomp, 0, MFInfo().SetAlloc(false));
567 }
568
569 m_do_alld_fft = (ParallelDescriptor::NProcs() == 1) && (! m_info.twod_mode);
570
571 if (!m_do_alld_fft) // do a series of 1d or 2d ffts
572 {
573 //
574 // make data containers
575 //
576
577#if (AMREX_SPACEDIM >= 2)
579 if ((m_real_domain.length(1) > 1) && !m_slab_decomp && !m_info.oned_mode)
580 {
581 auto cbay = amrex::decompose(m_spectral_domain_y, nprocs,
582 {AMREX_D_DECL(false,true,true)}, true);
583 if (cbay.size() == dmx.size()) {
584 cdmy = dmx;
585 } else {
586 cdmy = detail::make_iota_distromap(cbay.size());
587 }
588 m_cy.define(cbay, cdmy, ncomp, 0, MFInfo().SetAlloc(false));
589 }
590#endif
591
592#if (AMREX_SPACEDIM == 3)
593 if (m_real_domain.length(1) > 1 &&
594 (! m_info.twod_mode && m_real_domain.length(2) > 1))
595 {
596 auto cbaz = amrex::decompose(m_spectral_domain_z, nprocs,
597 {false,true,true}, true);
599 if (cbaz.size() == dmx.size()) {
600 cdmz = dmx;
601 } else if (cbaz.size() == cdmy.size()) {
602 cdmz = cdmy;
603 } else {
604 cdmz = detail::make_iota_distromap(cbaz.size());
605 }
606 m_cz.define(cbaz, cdmz, ncomp, 0, MFInfo().SetAlloc(false));
607 }
608#endif
609
610 if constexpr (C) {
611 if (m_slab_decomp) {
612 m_data_1 = detail::make_mfs_share(m_rx, m_cx);
613 m_data_2 = detail::make_mfs_share(m_cz, m_cz);
614 } else {
615 m_data_1 = detail::make_mfs_share(m_rx, m_cz);
616 m_data_2 = detail::make_mfs_share(m_cy, m_cy);
617 // make m_cx an alias to m_rx
618 if (myproc < m_cx.size()) {
619 Box const& box = m_cx.fabbox(myproc);
620 using FAB = typename cMF::FABType::value_type;
621 m_cx.setFab(myproc, FAB(box, ncomp, m_rx[myproc].dataPtr()));
622 }
623 }
624 } else {
625 if (m_slab_decomp) {
626 m_data_1 = detail::make_mfs_share(m_rx, m_cz);
627 m_data_2 = detail::make_mfs_share(m_cx, m_cx);
628 } else {
629 m_data_1 = detail::make_mfs_share(m_rx, m_cy);
630 m_data_2 = detail::make_mfs_share(m_cx, m_cz);
631 }
632 }
633
634 //
635 // make copiers
636 //
637
638#if (AMREX_SPACEDIM >= 2)
639 if (! m_cy.empty()) {
640 // comm meta-data between x and y phases
641 m_cmd_x2y = std::make_unique<MultiBlockCommMetaData>
642 (m_cy, m_spectral_domain_y, m_cx, IntVect(0), m_dtos_x2y);
643 m_cmd_y2x = std::make_unique<MultiBlockCommMetaData>
644 (m_cx, m_spectral_domain_x, m_cy, IntVect(0), m_dtos_y2x);
645 }
646#endif
647#if (AMREX_SPACEDIM == 3)
648 if (! m_cz.empty() ) {
649 if (m_slab_decomp) {
650 // comm meta-data between xy and z phases
651 m_cmd_x2z = std::make_unique<MultiBlockCommMetaData>
652 (m_cz, m_spectral_domain_z, m_cx, IntVect(0), m_dtos_x2z);
653 m_cmd_z2x = std::make_unique<MultiBlockCommMetaData>
654 (m_cx, m_spectral_domain_x, m_cz, IntVect(0), m_dtos_z2x);
655 } else {
656 // comm meta-data between y and z phases
657 m_cmd_y2z = std::make_unique<MultiBlockCommMetaData>
658 (m_cz, m_spectral_domain_z, m_cy, IntVect(0), m_dtos_y2z);
659 m_cmd_z2y = std::make_unique<MultiBlockCommMetaData>
660 (m_cy, m_spectral_domain_y, m_cz, IntVect(0), m_dtos_z2y);
661 }
662 }
663#endif
664
665 //
666 // make plans
667 //
668
669 if (myproc < m_rx.size())
670 {
671 if constexpr (C) {
672 int ndims = m_slab_decomp ? 2 : 1;
673 std::tie(m_fft_fwd_x, m_fft_bwd_x) = make_c2c_plans(m_cx, ndims);
674 } else {
675 Box const& box = m_rx.box(myproc);
676 auto* pr = m_rx[myproc].dataPtr();
677 auto* pc = (typename Plan<T>::VendorComplex *)m_cx[myproc].dataPtr();
678#ifdef AMREX_USE_SYCL
679 m_fft_fwd_x.template init_r2c<Direction::forward>(box, pr, pc, m_slab_decomp, ncomp);
680 m_fft_bwd_x = m_fft_fwd_x;
681#else
682 if constexpr (D == Direction::both || D == Direction::forward) {
683 m_fft_fwd_x.template init_r2c<Direction::forward>(box, pr, pc, m_slab_decomp, ncomp);
684 }
685 if constexpr (D == Direction::both || D == Direction::backward) {
686 m_fft_bwd_x.template init_r2c<Direction::backward>(box, pr, pc, m_slab_decomp, ncomp);
687 }
688#endif
689 }
690 }
691
692#if (AMREX_SPACEDIM >= 2)
693 if (! m_cy.empty()) {
694 std::tie(m_fft_fwd_y, m_fft_bwd_y) = make_c2c_plans(m_cy,1);
695 }
696#endif
697#if (AMREX_SPACEDIM == 3)
698 if (! m_cz.empty()) {
699 std::tie(m_fft_fwd_z, m_fft_bwd_z) = make_c2c_plans(m_cz,1);
700 }
701#endif
702 }
703 else // do fft in all dimensions at the same time
704 {
705 if constexpr (C) {
706 m_data_1 = detail::make_mfs_share(m_rx, m_cx);
707 std::tie(m_fft_fwd_x, m_fft_bwd_x) = make_c2c_plans(m_cx,AMREX_SPACEDIM);
708 } else {
709 m_data_1 = detail::make_mfs_share(m_rx, m_rx);
710 m_data_2 = detail::make_mfs_share(m_cx, m_cx);
711
712 auto const& len = m_real_domain.length();
713 auto* pr = (void*)m_rx[0].dataPtr();
714 auto* pc = (void*)m_cx[0].dataPtr();
715#ifdef AMREX_USE_SYCL
716 m_fft_fwd_x.template init_r2c<Direction::forward>(len, pr, pc, false, ncomp);
717 m_fft_bwd_x = m_fft_fwd_x;
718#else
719 if constexpr (D == Direction::both || D == Direction::forward) {
720 m_fft_fwd_x.template init_r2c<Direction::forward>(len, pr, pc, false, ncomp);
721 }
722 if constexpr (D == Direction::both || D == Direction::backward) {
723 m_fft_bwd_x.template init_r2c<Direction::backward>(len, pr, pc, false, ncomp);
724 }
725#endif
726 }
727 }
728}
729
730template <typename T, Direction D, bool C>
731R2C<T,D,C>::R2C (std::array<int,AMREX_SPACEDIM> const& domain_size, Info const& info)
732 : R2C<T,D,C>(Box(IntVect(0),IntVect(domain_size)-1), info)
733{}
734
735template <typename T, Direction D, bool C>
737{
738 if (m_fft_bwd_x.plan != m_fft_fwd_x.plan) {
739 m_fft_bwd_x.destroy();
740 }
741 if (m_fft_bwd_y.plan != m_fft_fwd_y.plan) {
742 m_fft_bwd_y.destroy();
743 }
744 if (m_fft_bwd_z.plan != m_fft_fwd_z.plan) {
745 m_fft_bwd_z.destroy();
746 }
747 m_fft_fwd_x.destroy();
748 m_fft_fwd_y.destroy();
749 m_fft_fwd_z.destroy();
750 if (m_fft_bwd_x_half.plan != m_fft_fwd_x_half.plan) {
751 m_fft_bwd_x_half.destroy();
752 }
753 m_fft_fwd_x_half.destroy();
754}
755
756template <typename T, Direction D, bool C>
757std::pair<BoxArray,DistributionMapping>
758R2C<T,D,C>::make_layout_from_local_domain (std::array<int,AMREX_SPACEDIM> const& local_start,
759 std::array<int,AMREX_SPACEDIM> const& local_size)
760{
761 IntVect lo(local_start);
762 IntVect len(local_size);
763 Box bx(lo, lo+len-1);
764#ifdef AMREX_USE_MPI
766 MPI_Allgather(&bx, 1, ParallelDescriptor::Mpi_typemap<Box>::type(),
767 allboxes.data(), 1, ParallelDescriptor::Mpi_typemap<Box>::type(),
769 Vector<int> pmap;
770 pmap.reserve(allboxes.size());
771 for (int i = 0; i < allboxes.size(); ++i) {
772 if (allboxes[i].ok()) {
773 pmap.push_back(ParallelContext::local_to_global_rank(i));
774 }
775 }
776 allboxes.erase(std::remove_if(allboxes.begin(), allboxes.end(),
777 [=] (Box const& b) { return b.isEmpty(); }),
778 allboxes.end());
779 BoxList bl(std::move(allboxes));
780 return std::make_pair(BoxArray(std::move(bl)), DistributionMapping(std::move(pmap)));
781#else
782 return std::make_pair(BoxArray(bx), DistributionMapping(Vector<int>({0})));
783#endif
784}
785
786template <typename T, Direction D, bool C>
787void R2C<T,D,C>::setLocalDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
788 std::array<int,AMREX_SPACEDIM> const& local_size)
789{
790 auto const& [ba, dm] = make_layout_from_local_domain(local_start, local_size);
791 m_raw_mf = MF(ba, dm, m_rx.nComp(), 0, MFInfo().SetAlloc(false));
792}
793
794template <typename T, Direction D, bool C>
795std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
797{
798 m_raw_mf = MF(m_rx.boxArray(), m_rx.DistributionMap(), m_rx.nComp(), 0,
799 MFInfo{}.SetAlloc(false));
800
801 auto const myproc = ParallelContext::MyProcSub();
802 if (myproc < m_rx.size()) {
803 Box const& box = m_rx.box(myproc);
804 return std::make_pair(box.smallEnd().toArray(),
805 box.length().toArray());
806 } else {
807 return std::make_pair(std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)},
808 std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)});
809 }
810}
811
812template <typename T, Direction D, bool C>
813void R2C<T,D,C>::setLocalSpectralDomain (std::array<int,AMREX_SPACEDIM> const& local_start,
814 std::array<int,AMREX_SPACEDIM> const& local_size)
815{
816 auto const& [ba, dm] = make_layout_from_local_domain(local_start, local_size);
817 m_raw_cmf = cMF(ba, dm, m_rx.nComp(), 0, MFInfo().SetAlloc(false));
818}
819
820template <typename T, Direction D, bool C>
821std::pair<std::array<int,AMREX_SPACEDIM>,std::array<int,AMREX_SPACEDIM>>
823{
824 auto const ncomp = m_info.batch_size;
825 auto const& [ba, dm] = getSpectralDataLayout();
826
827 m_raw_cmf = cMF(ba, dm, ncomp, 0, MFInfo{}.SetAlloc(false));
828
829 auto const myproc = ParallelContext::MyProcSub();
830 if (myproc < m_raw_cmf.size()) {
831 Box const& box = m_raw_cmf.box(myproc);
832 return std::make_pair(box.smallEnd().toArray(), box.length().toArray());
833 } else {
834 return std::make_pair(std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)},
835 std::array<int,AMREX_SPACEDIM>{AMREX_D_DECL(0,0,0)});
836 }
837}
838
839template <typename T, Direction D, bool C>
841{
842 if (C || m_r2c_sub) { amrex::Abort("R2C: OpenBC not supported with reduced dimensions or complex inputs"); }
843
844#if (AMREX_SPACEDIM == 3)
845 if (m_do_alld_fft) { return; }
846
847 auto const ncomp = m_info.batch_size;
848
849 if (m_slab_decomp && ! m_fft_fwd_x_half.defined) {
850 auto* fab = detail::get_fab(m_rx);
851 if (fab) {
852 Box bottom_half = m_real_domain;
853 bottom_half.growHi(2,-m_real_domain.length(2)/2);
854 Box box = fab->box() & bottom_half;
855 if (box.ok()) {
856 auto* pr = fab->dataPtr();
857 auto* pc = (typename Plan<T>::VendorComplex *)
858 detail::get_fab(m_cx)->dataPtr();
859#ifdef AMREX_USE_SYCL
860 m_fft_fwd_x_half.template init_r2c<Direction::forward>
861 (box, pr, pc, m_slab_decomp, ncomp);
862 m_fft_bwd_x_half = m_fft_fwd_x_half;
863#else
864 if constexpr (D == Direction::both || D == Direction::forward) {
865 m_fft_fwd_x_half.template init_r2c<Direction::forward>
866 (box, pr, pc, m_slab_decomp, ncomp);
867 }
868 if constexpr (D == Direction::both || D == Direction::backward) {
869 m_fft_bwd_x_half.template init_r2c<Direction::backward>
870 (box, pr, pc, m_slab_decomp, ncomp);
871 }
872#endif
873 }
874 }
875 } // else todo
876
877 if (m_cmd_x2z && ! m_cmd_x2z_half) {
878 Box bottom_half = m_spectral_domain_z;
879 // Note that z-direction's index is 0 because we z is the
880 // unit-stride direction here.
881 bottom_half.growHi(0,-m_spectral_domain_z.length(0)/2);
882 m_cmd_x2z_half = std::make_unique<MultiBlockCommMetaData>
883 (m_cz, bottom_half, m_cx, IntVect(0), m_dtos_x2z);
884 }
885
886 if (m_cmd_z2x && ! m_cmd_z2x_half) {
887 Box bottom_half = m_spectral_domain_x;
888 bottom_half.growHi(2,-m_spectral_domain_x.length(2)/2);
889 m_cmd_z2x_half = std::make_unique<MultiBlockCommMetaData>
890 (m_cx, bottom_half, m_cz, IntVect(0), m_dtos_z2x);
891 }
892#endif
893}
894
895template <typename T, Direction D, bool C>
896template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
897 DIR == Direction::both, int> >
898void R2C<T,D,C>::forward (MF const& inmf, int incomp)
899{
900 BL_PROFILE("FFT::R2C::forward(in)");
901
902 auto const ncomp = m_info.batch_size;
903
904 if (m_r2c_sub) {
905 if (m_sub_helper.ghost_safe(inmf.nGrowVect())) {
906 m_r2c_sub->forward(m_sub_helper.make_alias_mf(inmf), incomp);
907 } else {
908 MF tmp(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
909 tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
910 m_r2c_sub->forward(m_sub_helper.make_alias_mf(tmp),0);
911 }
912 return;
913 }
914
915 if (&m_rx != &inmf) {
916 m_rx.ParallelCopy(inmf, incomp, 0, ncomp);
917 }
918
919 if (m_do_alld_fft) {
920 if constexpr (C) {
921 m_fft_fwd_x.template compute_c2c<Direction::forward>();
922 } else {
923 m_fft_fwd_x.template compute_r2c<Direction::forward>();
924 }
925 return;
926 }
927
928 auto& fft_x = m_openbc_half ? m_fft_fwd_x_half : m_fft_fwd_x;
929 if constexpr (C) {
930 fft_x.template compute_c2c<Direction::forward>();
931 } else {
932 fft_x.template compute_r2c<Direction::forward>();
933 }
934
935 if ( m_cmd_x2y) {
936 ParallelCopy(m_cy, m_cx, *m_cmd_x2y, 0, 0, ncomp, m_dtos_x2y);
937 }
938 m_fft_fwd_y.template compute_c2c<Direction::forward>();
939
940 if ( m_cmd_y2z) {
941 ParallelCopy(m_cz, m_cy, *m_cmd_y2z, 0, 0, ncomp, m_dtos_y2z);
942 }
943#if (AMREX_SPACEDIM == 3)
944 else if ( m_cmd_x2z) {
945 if (m_openbc_half) {
946 NonLocalBC::PackComponents components{};
947 components.n_components = ncomp;
949 {components, m_dtos_x2z};
950 auto handler = ParallelCopy_nowait(m_cz, m_cx, *m_cmd_x2z_half, packing);
951
952 Box upper_half = m_spectral_domain_z;
953 // Note that z-direction's index is 0 because we z is the
954 // unit-stride direction here.
955 upper_half.growLo (0,-m_spectral_domain_z.length(0)/2);
956 m_cz.setVal(0, upper_half, 0, ncomp);
957
958 ParallelCopy_finish(m_cz, std::move(handler), *m_cmd_x2z_half, packing);
959 } else {
960 ParallelCopy(m_cz, m_cx, *m_cmd_x2z, 0, 0, ncomp, m_dtos_x2z);
961 }
962 }
963#endif
964 m_fft_fwd_z.template compute_c2c<Direction::forward>();
965}
966
967template <typename T, Direction D, bool C>
968template <typename FA, typename RT>
969std::pair<std::unique_ptr<char,DataDeleter>,std::size_t>
970R2C<T,D,C>::install_raw_ptr (FA& fa, RT const* p)
971{
972 AMREX_ALWAYS_ASSERT(!fa.empty());
973
974 using FAB = typename FA::FABType::value_type;
975 using T_FAB = typename FAB::value_type;
976 static_assert(sizeof(T_FAB) == sizeof(RT));
977
978 auto const ncomp = m_info.batch_size;
979 auto const& ia = fa.IndexArray();
980
981 T_FAB* pp = nullptr;
982 std::size_t sz = 0;
983
984 if ( ! ia.empty() ) {
985 int K = ia[0];
986 Box const& box = fa.fabbox(K);
987 if ((alignof(T_FAB) == alignof(RT)) || amrex::is_aligned(p,alignof(T_FAB))) {
988 pp = (T_FAB*)p;
989 } else {
990 sz = sizeof(T_FAB) * box.numPts() * ncomp;
991 pp = (T_FAB*) The_Arena()->alloc(sz);
992 }
993 fa.setFab(K, FAB(box,ncomp,pp));
994 }
995
996 if (sz == 0) {
997 return std::make_pair(std::unique_ptr<char,DataDeleter>{},std::size_t(0));
998 } else {
999 return std::make_pair(std::unique_ptr<char,DataDeleter>
1000 {(char*)pp,DataDeleter{The_Arena()}}, sz);
1001 }
1002}
1003
1004
1005template <typename T, Direction D, bool C>
1006template <typename RT, typename CT, Direction DIR, bool CP,
1007 std::enable_if_t<(DIR == Direction::forward ||
1008 DIR == Direction::both)
1009 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
1010 (sizeof(RT) == sizeof(CT) && CP)), int> >
1011void R2C<T,D,C>::forward (RT const* in, CT* out)
1012{
1013 auto [rdata, rsz] = install_raw_ptr(m_raw_mf, in);
1014 auto [cdata, csz] = install_raw_ptr(m_raw_cmf, out);
1015
1016 if (rsz > 0) {
1017 Gpu::dtod_memcpy_async(rdata.get(),in,rsz);
1019 }
1020
1021 forward(m_raw_mf, m_raw_cmf);
1022
1023 if (csz) {
1024 Gpu::dtod_memcpy_async(out,cdata.get(),csz);
1026 }
1027}
1028
1029template <typename T, Direction D, bool C>
1030template <Direction DIR, std::enable_if_t<DIR == Direction::both, int> >
1031void R2C<T,D,C>::backward (MF& outmf, int outcomp)
1032{
1033 backward_doit(outmf, IntVect(0), Periodicity::NonPeriodic(), outcomp);
1034}
1035
1036template <typename T, Direction D, bool C>
1037void R2C<T,D,C>::backward_doit (MF& outmf, IntVect const& ngout,
1038 Periodicity const& period, int outcomp)
1039{
1040 BL_PROFILE("FFT::R2C::backward(out)");
1041
1042 auto const ncomp = m_info.batch_size;
1043
1044 if (m_r2c_sub) {
1045 if (m_sub_helper.ghost_safe(outmf.nGrowVect())) {
1046 MF submf = m_sub_helper.make_alias_mf(outmf);
1047 IntVect const& subngout = m_sub_helper.make_iv(ngout);
1048 Periodicity const& subperiod = m_sub_helper.make_periodicity(period);
1049 m_r2c_sub->backward_doit(submf, subngout, subperiod, outcomp);
1050 } else {
1051 MF tmp(outmf.boxArray(), outmf.DistributionMap(), ncomp,
1052 m_sub_helper.make_safe_ghost(outmf.nGrowVect()));
1053 this->backward_doit(tmp, ngout, period, 0);
1054 outmf.LocalCopy(tmp, 0, outcomp, ncomp, tmp.nGrowVect());
1055 }
1056 return;
1057 }
1058
1059 if (m_do_alld_fft) {
1060 if constexpr (C) {
1061 m_fft_bwd_x.template compute_c2c<Direction::backward>();
1062 } else {
1063 m_fft_bwd_x.template compute_r2c<Direction::backward>();
1064 }
1065 outmf.ParallelCopy(m_rx, 0, outcomp, ncomp, IntVect(0),
1066 amrex::elemwiseMin(ngout,outmf.nGrowVect()), period);
1067 return;
1068 }
1069
1070 m_fft_bwd_z.template compute_c2c<Direction::backward>();
1071 if ( m_cmd_z2y) {
1072 ParallelCopy(m_cy, m_cz, *m_cmd_z2y, 0, 0, ncomp, m_dtos_z2y);
1073 }
1074#if (AMREX_SPACEDIM == 3)
1075 else if ( m_cmd_z2x) {
1076 auto const& cmd = m_openbc_half ? m_cmd_z2x_half : m_cmd_z2x;
1077 ParallelCopy(m_cx, m_cz, *cmd, 0, 0, ncomp, m_dtos_z2x);
1078 }
1079#endif
1080
1081 m_fft_bwd_y.template compute_c2c<Direction::backward>();
1082 if ( m_cmd_y2x) {
1083 ParallelCopy(m_cx, m_cy, *m_cmd_y2x, 0, 0, ncomp, m_dtos_y2x);
1084 }
1085
1086 auto& fft_x = m_openbc_half ? m_fft_bwd_x_half : m_fft_bwd_x;
1087 if constexpr (C) {
1088 fft_x.template compute_c2c<Direction::backward>();
1089 } else {
1090 fft_x.template compute_r2c<Direction::backward>();
1091 }
1092 outmf.ParallelCopy(m_rx, 0, outcomp, ncomp, IntVect(0),
1093 amrex::elemwiseMin(ngout,outmf.nGrowVect()), period);
1094}
1095
1096template <typename T, Direction D, bool C>
1097template <typename CT, typename RT, Direction DIR, bool CP,
1098 std::enable_if_t<(DIR == Direction::backward ||
1099 DIR == Direction::both)
1100 && ((sizeof(RT)*2 == sizeof(CT) && !CP) ||
1101 (sizeof(RT) == sizeof(CT) && CP)), int> >
1102void R2C<T,D,C>::backward (CT const* in, RT* out)
1103{
1104 auto [rdata, rsz] = install_raw_ptr(m_raw_mf, out);
1105 auto [cdata, csz] = install_raw_ptr(m_raw_cmf, in);
1106
1107 if (csz) {
1108 Gpu::dtod_memcpy_async(cdata.get(),in,csz);
1110 }
1111
1112 backward(m_raw_cmf, m_raw_mf);
1113
1114 if (rsz > 0) {
1115 Gpu::dtod_memcpy_async(out,rdata.get(),rsz);
1117 }
1118}
1119
1120template <typename T, Direction D, bool C>
1121std::pair<Plan<T>, Plan<T>>
1122R2C<T,D,C>::make_c2c_plans (cMF& inout, int ndims) const
1123{
1124 Plan<T> fwd;
1125 Plan<T> bwd;
1126
1127 auto* fab = detail::get_fab(inout);
1128 if (!fab) { return {fwd, bwd};}
1129
1130 Box const& box = fab->box();
1131 auto* pio = (typename Plan<T>::VendorComplex *)fab->dataPtr();
1132
1133 auto const ncomp = m_info.batch_size;
1134
1135#ifdef AMREX_USE_SYCL
1136 fwd.template init_c2c<Direction::forward>(box, pio, ncomp, ndims);
1137 bwd = fwd;
1138#else
1139 if constexpr (D == Direction::both || D == Direction::forward) {
1140 fwd.template init_c2c<Direction::forward>(box, pio, ncomp, ndims);
1141 }
1142 if constexpr (D == Direction::both || D == Direction::backward) {
1143 bwd.template init_c2c<Direction::backward>(box, pio, ncomp, ndims);
1144 }
1145#endif
1146
1147 return {fwd, bwd};
1148}
1149
1150template <typename T, Direction D, bool C>
1151template <typename F>
1152void R2C<T,D,C>::post_forward_doit_0 (F const& post_forward)
1153{
1154 if (m_info.twod_mode || m_info.batch_size > 1) {
1155 amrex::Abort("xxxxx todo: post_forward");
1156#if (AMREX_SPACEDIM > 1)
1157 } else if (m_r2c_sub) {
1158 // We need to pass the originally ordered indices to post_forward.
1159#if (AMREX_SPACEDIM == 2)
1160 // The original domain is (1,ny). The sub domain is (ny,1).
1161 m_r2c_sub->post_forward_doit_1
1162 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1163 {
1164 post_forward(0, i, 0, sp);
1165 });
1166#else
1167 if (m_real_domain.length(0) == 1 && m_real_domain.length(1) == 1) {
1168 // Original domain: (1, 1, nz). Sub domain: (nz, 1, 1)
1169 m_r2c_sub->post_forward_doit_1
1170 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1171 {
1172 post_forward(0, 0, i, sp);
1173 });
1174 } else if (m_real_domain.length(0) == 1 && m_real_domain.length(2) == 1) {
1175 // Original domain: (1, ny, 1). Sub domain: (ny, 1, 1)
1176 m_r2c_sub->post_forward_doit_1
1177 ([=] AMREX_GPU_DEVICE (int i, int, int, auto& sp)
1178 {
1179 post_forward(0, i, 0, sp);
1180 });
1181 } else if (m_real_domain.length(0) == 1) {
1182 // Original domain: (1, ny, nz). Sub domain: (ny, nz, 1)
1183 m_r2c_sub->post_forward_doit_1
1184 ([=] AMREX_GPU_DEVICE (int i, int j, int, auto& sp)
1185 {
1186 post_forward(0, i, j, sp);
1187 });
1188 } else if (m_real_domain.length(1) == 1) {
1189 // Original domain: (nx, 1, nz). Sub domain: (nx, nz, 1)
1190 m_r2c_sub->post_forward_doit_1
1191 ([=] AMREX_GPU_DEVICE (int i, int j, int, auto& sp)
1192 {
1193 post_forward(i, 0, j, sp);
1194 });
1195 } else {
1196 amrex::Abort("R2c::post_forward_doit_0: how did this happen?");
1197 }
1198#endif
1199#endif
1200 } else {
1201 this->post_forward_doit_1(post_forward);
1202 }
1203}
1204
1205template <typename T, Direction D, bool C>
1206template <typename F>
1207void R2C<T,D,C>::post_forward_doit_1 (F const& post_forward)
1208{
1209 if (m_info.twod_mode || m_info.batch_size > 1) {
1210 amrex::Abort("xxxxx todo: post_forward");
1211 } else if (m_r2c_sub) {
1212 amrex::Abort("R2C::post_forward_doit_1: How did this happen?");
1213 } else {
1214 if ( ! m_cz.empty()) {
1215 auto* spectral_fab = detail::get_fab(m_cz);
1216 if (spectral_fab) {
1217 auto const& a = spectral_fab->array(); // m_cz's ordering is z,x,y
1218 ParallelForOMP(spectral_fab->box(),
1219 [=] AMREX_GPU_DEVICE (int iz, int jx, int ky)
1220 {
1221 post_forward(jx,ky,iz,a(iz,jx,ky));
1222 });
1223 }
1224 } else if ( ! m_cy.empty()) {
1225 auto* spectral_fab = detail::get_fab(m_cy);
1226 if (spectral_fab) {
1227 auto const& a = spectral_fab->array(); // m_cy's ordering is y,x,z
1228 ParallelForOMP(spectral_fab->box(),
1229 [=] AMREX_GPU_DEVICE (int iy, int jx, int k)
1230 {
1231 post_forward(jx,iy,k,a(iy,jx,k));
1232 });
1233 }
1234 } else {
1235 auto* spectral_fab = detail::get_fab(m_cx);
1236 if (spectral_fab) {
1237 auto const& a = spectral_fab->array();
1238 ParallelForOMP(spectral_fab->box(),
1239 [=] AMREX_GPU_DEVICE (int i, int j, int k)
1240 {
1241 post_forward(i,j,k,a(i,j,k));
1242 });
1243 }
1244 }
1245 }
1246}
1247
1248template <typename T, Direction D, bool C>
1250{
1251#if (AMREX_SPACEDIM == 3)
1252 if (m_info.twod_mode) {
1253 return T(1)/T(Long(m_real_domain.length(0)) *
1254 Long(m_real_domain.length(1)));
1255 } else
1256#endif
1257 {
1258 return T(1)/T(m_real_domain.numPts());
1259 }
1260}
1261
1262template <typename T, Direction D, bool C>
1263template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
1264 DIR == Direction::both, int> >
1265std::pair<typename R2C<T,D,C>::cMF *, IntVect>
1267{
1268#if (AMREX_SPACEDIM > 1)
1269 if (m_r2c_sub) {
1270 auto [cmf, order] = m_r2c_sub->getSpectralData();
1271 return std::make_pair(cmf, m_sub_helper.inverse_order(order));
1272 } else
1273#endif
1274 if (!m_cz.empty()) {
1275 return std::make_pair(const_cast<cMF*>(&m_cz), IntVect{AMREX_D_DECL(2,0,1)});
1276 } else if (!m_cy.empty()) {
1277 return std::make_pair(const_cast<cMF*>(&m_cy), IntVect{AMREX_D_DECL(1,0,2)});
1278 } else {
1279 return std::make_pair(const_cast<cMF*>(&m_cx), IntVect{AMREX_D_DECL(0,1,2)});
1280 }
1281}
1282
1283template <typename T, Direction D, bool C>
1284template <Direction DIR, std::enable_if_t<DIR == Direction::forward ||
1285 DIR == Direction::both, int> >
1286void R2C<T,D,C>::forward (MF const& inmf, cMF& outmf, int incomp, int outcomp)
1287{
1288 BL_PROFILE("FFT::R2C::forward(inout)");
1289
1290 auto const ncomp = m_info.batch_size;
1291
1292 if (m_r2c_sub)
1293 {
1294 bool inmf_safe = m_sub_helper.ghost_safe(inmf.nGrowVect());
1295 MF inmf_sub, inmf_tmp;
1296 int incomp_sub;
1297 if (inmf_safe) {
1298 inmf_sub = m_sub_helper.make_alias_mf(inmf);
1299 incomp_sub = incomp;
1300 } else {
1301 inmf_tmp.define(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
1302 inmf_tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
1303 inmf_sub = m_sub_helper.make_alias_mf(inmf_tmp);
1304 incomp_sub = 0;
1305 }
1306
1307 bool outmf_safe = m_sub_helper.ghost_safe(outmf.nGrowVect());
1308 cMF outmf_sub, outmf_tmp;
1309 int outcomp_sub;
1310 if (outmf_safe) {
1311 outmf_sub = m_sub_helper.make_alias_mf(outmf);
1312 outcomp_sub = outcomp;
1313 } else {
1314 outmf_tmp.define(outmf.boxArray(), outmf.DistributionMap(), ncomp, 0);
1315 outmf_sub = m_sub_helper.make_alias_mf(outmf_tmp);
1316 outcomp_sub = 0;
1317 }
1318
1319 m_r2c_sub->forward(inmf_sub, outmf_sub, incomp_sub, outcomp_sub);
1320
1321 if (!outmf_safe) {
1322 outmf.LocalCopy(outmf_tmp, 0, outcomp, ncomp, IntVect(0));
1323 }
1324 }
1325 else
1326 {
1327 forward(inmf, incomp);
1328 if (!m_cz.empty()) { // m_cz's order (z,x,y) -> (x,y,z)
1329 RotateBwd dtos{};
1331 (outmf, m_spectral_domain_x, m_cz, IntVect(0), dtos);
1332 ParallelCopy(outmf, m_cz, cmd, 0, outcomp, ncomp, dtos);
1333 } else if (!m_cy.empty()) { // m_cy's order (y,x,z) -> (x,y,z)
1335 (outmf, m_spectral_domain_x, m_cy, IntVect(0), m_dtos_y2x);
1336 ParallelCopy(outmf, m_cy, cmd, 0, outcomp, ncomp, m_dtos_y2x);
1337 } else {
1338 outmf.ParallelCopy(m_cx, 0, outcomp, ncomp);
1339 }
1340 }
1341}
1342
1343template <typename T, Direction D, bool C>
1344template <Direction DIR, std::enable_if_t<DIR == Direction::backward ||
1345 DIR == Direction::both, int> >
1346void R2C<T,D,C>::backward (cMF const& inmf, MF& outmf, int incomp, int outcomp)
1347{
1348 backward_doit(inmf, outmf, IntVect(0), Periodicity::NonPeriodic(), incomp, outcomp);
1349}
1350
1351template <typename T, Direction D, bool C>
1352void R2C<T,D,C>::backward_doit (cMF const& inmf, MF& outmf, IntVect const& ngout,
1353 Periodicity const& period, int incomp, int outcomp)
1354{
1355 BL_PROFILE("FFT::R2C::backward(inout)");
1356
1357 auto const ncomp = m_info.batch_size;
1358
1359 if (m_r2c_sub)
1360 {
1361 bool inmf_safe = m_sub_helper.ghost_safe(inmf.nGrowVect());
1362 cMF inmf_sub, inmf_tmp;
1363 int incomp_sub;
1364 if (inmf_safe) {
1365 inmf_sub = m_sub_helper.make_alias_mf(inmf);
1366 incomp_sub = incomp;
1367 } else {
1368 inmf_tmp.define(inmf.boxArray(), inmf.DistributionMap(), ncomp, 0);
1369 inmf_tmp.LocalCopy(inmf, incomp, 0, ncomp, IntVect(0));
1370 inmf_sub = m_sub_helper.make_alias_mf(inmf_tmp);
1371 incomp_sub = 0;
1372 }
1373
1374 bool outmf_safe = m_sub_helper.ghost_safe(outmf.nGrowVect());
1375 MF outmf_sub, outmf_tmp;
1376 int outcomp_sub;
1377 if (outmf_safe) {
1378 outmf_sub = m_sub_helper.make_alias_mf(outmf);
1379 outcomp_sub = outcomp;
1380 } else {
1381 IntVect const& ngtmp = m_sub_helper.make_safe_ghost(outmf.nGrowVect());
1382 outmf_tmp.define(outmf.boxArray(), outmf.DistributionMap(), ncomp, ngtmp);
1383 outmf_sub = m_sub_helper.make_alias_mf(outmf_tmp);
1384 outcomp_sub = 0;
1385 }
1386
1387 IntVect const& subngout = m_sub_helper.make_iv(ngout);
1388 Periodicity const& subperiod = m_sub_helper.make_periodicity(period);
1389 m_r2c_sub->backward_doit(inmf_sub, outmf_sub, subngout, subperiod, incomp_sub, outcomp_sub);
1390
1391 if (!outmf_safe) {
1392 outmf.LocalCopy(outmf_tmp, 0, outcomp, ncomp, outmf_tmp.nGrowVect());
1393 }
1394 }
1395 else
1396 {
1397 if (!m_cz.empty()) { // (x,y,z) -> m_cz's order (z,x,y)
1398 RotateFwd dtos{};
1399 MultiBlockCommMetaData cmd
1400 (m_cz, m_spectral_domain_z, inmf, IntVect(0), dtos);
1401 ParallelCopy(m_cz, inmf, cmd, incomp, 0, ncomp, dtos);
1402 } else if (!m_cy.empty()) { // (x,y,z) -> m_cy's ordering (y,x,z)
1403 MultiBlockCommMetaData cmd
1404 (m_cy, m_spectral_domain_y, inmf, IntVect(0), m_dtos_x2y);
1405 ParallelCopy(m_cy, inmf, cmd, incomp, 0, ncomp, m_dtos_x2y);
1406 } else {
1407 m_cx.ParallelCopy(inmf, incomp, 0, ncomp);
1408 }
1409 backward_doit(outmf, ngout, period, outcomp);
1410 }
1411}
1412
1413template <typename T, Direction D, bool C>
1414std::pair<BoxArray,DistributionMapping>
1416{
1417#if (AMREX_SPACEDIM > 1)
1418 if (m_r2c_sub) {
1419 auto const& [ba, dm] = m_r2c_sub->getSpectralDataLayout();
1420 return std::make_pair(m_sub_helper.inverse_boxarray(ba), dm);
1421 }
1422#endif
1423
1424#if (AMREX_SPACEDIM == 3)
1425 if (!m_cz.empty()) {
1426 BoxList bl = m_cz.boxArray().boxList();
1427 for (auto& b : bl) {
1428 auto lo = b.smallEnd();
1429 auto hi = b.bigEnd();
1430 std::swap(lo[0], lo[1]);
1431 std::swap(lo[1], lo[2]);
1432 std::swap(hi[0], hi[1]);
1433 std::swap(hi[1], hi[2]);
1434 b.setSmall(lo);
1435 b.setBig(hi);
1436 }
1437 return std::make_pair(BoxArray(std::move(bl)), m_cz.DistributionMap());
1438 } else
1439#endif
1440#if (AMREX_SPACEDIM >= 2)
1441 if (!m_cy.empty()) {
1442 BoxList bl = m_cy.boxArray().boxList();
1443 for (auto& b : bl) {
1444 auto lo = b.smallEnd();
1445 auto hi = b.bigEnd();
1446 std::swap(lo[0], lo[1]);
1447 std::swap(hi[0], hi[1]);
1448 b.setSmall(lo);
1449 b.setBig(hi);
1450 }
1451 return std::make_pair(BoxArray(std::move(bl)), m_cy.DistributionMap());
1452 } else
1453#endif
1454 {
1455 return std::make_pair(m_cx.boxArray(), m_cx.DistributionMap());
1456 }
1457}
1458
1460template <typename T = Real, FFT::Direction D = FFT::Direction::both>
1462
1463}
1464
1465#endif
#define BL_PROFILE(a)
Definition AMReX_BLProfiler.H:551
#define AMREX_ALWAYS_ASSERT_WITH_MESSAGE(EX, MSG)
Definition AMReX_BLassert.H:49
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
amrex::ParmParse pp
Input file parser instance for the given namespace.
Definition AMReX_HypreIJIface.cpp:15
#define AMREX_D_DECL(a, b, c)
Definition AMReX_SPACE.H:171
virtual void * alloc(std::size_t sz)=0
A collection of Boxes stored in an Array.
Definition AMReX_BoxArray.H:568
A class for managing a List of Boxes that share a common IndexType. This class implements operations ...
Definition AMReX_BoxList.H:52
BoxList & shift(int dir, int nzones)
Applies Box::shift(int,int) to each Box in the BoxList.
Definition AMReX_BoxList.cpp:565
__host__ __device__ const IntVectND< dim > & bigEnd() const &noexcept
Return the inclusive upper bound of the box.
Definition AMReX_Box.H:123
__host__ __device__ Long numPts() const noexcept
Return the number of points contained in the BoxND.
Definition AMReX_Box.H:356
__host__ __device__ IntVectND< dim > length() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:154
__host__ __device__ int shortside(int &dir) const noexcept
Return length of shortest side. dir is modified to give direction with shortest side: 0....
Definition AMReX_Box.H:437
__host__ __device__ IntVectND< dim > size() const noexcept
Return the length of the BoxND.
Definition AMReX_Box.H:147
__host__ __device__ BoxND & growLo(int idir, int n_cell=1) noexcept
Grow the BoxND on the low end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the Bo...
Definition AMReX_Box.H:662
__host__ __device__ IndexTypeND< dim > ixType() const noexcept
Return the indexing type.
Definition AMReX_Box.H:135
__host__ __device__ BoxND & growHi(int idir, int n_cell=1) noexcept
Grow the BoxND on the high end by n_cell cells in direction idir. NOTE: n_cell negative shrinks the B...
Definition AMReX_Box.H:673
__host__ __device__ const IntVectND< dim > & smallEnd() const &noexcept
Return the inclusive lower bound of the box.
Definition AMReX_Box.H:111
Calculates the distribution of FABs to MPI processes.
Definition AMReX_DistributionMapping.H:43
Long size() const noexcept
Length of the underlying processor map.
Definition AMReX_DistributionMapping.H:129
Convolution-based solver for open boundary conditions using Green's functions.
Definition AMReX_FFT_OpenBCSolver.H:24
3D Poisson solver for periodic, Dirichlet & Neumann boundaries in the first two dimensions,...
Definition AMReX_FFT_Poisson.H:187
Poisson solver for periodic, Dirichlet & Neumann boundaries using FFT.
Definition AMReX_FFT_Poisson.H:67
Parallel Discrete Fourier Transform.
Definition AMReX_FFT_R2C.H:47
std::conditional_t< C, cMF, std::conditional_t< std::is_same_v< T, Real >, MultiFab, FabArray< BaseFab< T > > > > MF
Definition AMReX_FFT_R2C.H:52
R2C & operator=(R2C const &)=delete
void forward(MF const &inmf, cMF &outmf, int incomp=0, int outcomp=0)
Forward transform.
Definition AMReX_FFT_R2C.H:1286
~R2C()
Definition AMReX_FFT_R2C.H:736
void setLocalDomain(std::array< int, 3 > const &local_start, std::array< int, 3 > const &local_size)
Set local domain.
Definition AMReX_FFT_R2C.H:787
R2C(Box const &domain, Info const &info=Info{})
Constructor.
Definition AMReX_FFT_R2C.H:489
void backward(cMF const &inmf, MF &outmf, int incomp=0, int outcomp=0)
Backward transform.
Definition AMReX_FFT_R2C.H:1346
R2C(R2C &&)=delete
void backward(MF &outmf, int outcomp=0)
Backward transform.
Definition AMReX_FFT_R2C.H:1031
void post_forward_doit_1(F const &post_forward)
CUDA-visible helper that redistributes and applies post_forward for the batched layout.
Definition AMReX_FFT_R2C.H:1207
T scalingFactor() const
Scaling factor. If the data goes through forward and then backward, the result multiplied by the scal...
Definition AMReX_FFT_R2C.H:1249
void post_forward_doit_0(F const &post_forward)
CUDA-visible hook that walks internal spectral data and applies post_forward.
Definition AMReX_FFT_R2C.H:1152
std::pair< std::array< int, 3 >, std::array< int, 3 > > getLocalDomain() const
Get local domain.
Definition AMReX_FFT_R2C.H:796
void forward(RT const *in, CT *out)
Forward transform.
Definition AMReX_FFT_R2C.H:1011
R2C(R2C const &)=delete
std::pair< BoxArray, DistributionMapping > getSpectralDataLayout() const
Get BoxArray and DistributionMapping for spectral data.
Definition AMReX_FFT_R2C.H:1415
std::pair< cMF *, IntVect > getSpectralData() const
Get the internal spectral data.
R2C(std::array< int, 3 > const &domain_size, Info const &info=Info{})
Constructor.
Definition AMReX_FFT_R2C.H:731
FabArray< BaseFab< GpuComplex< T > > > cMF
Definition AMReX_FFT_R2C.H:49
std::pair< std::array< int, 3 >, std::array< int, 3 > > getLocalSpectralDomain() const
Get local spectral domain.
Definition AMReX_FFT_R2C.H:822
void forwardThenBackward(MF const &inmf, MF &outmf, F const &post_forward, int incomp=0, int outcomp=0)
Forward and then backward transform.
Definition AMReX_FFT_R2C.H:202
void setLocalSpectralDomain(std::array< int, 3 > const &local_start, std::array< int, 3 > const &local_size)
Set local spectral domain.
Definition AMReX_FFT_R2C.H:813
void backward(CT const *in, RT *out)
Backward transform.
Definition AMReX_FFT_R2C.H:1102
void forward(MF const &inmf, int incomp=0)
Forward transform.
Definition AMReX_FFT_R2C.H:898
IntVect nGrowVect() const noexcept
Definition AMReX_FabArrayBase.H:80
int size() const noexcept
Return the number of FABs in the FabArray.
Definition AMReX_FabArrayBase.H:110
const DistributionMapping & DistributionMap() const noexcept
Return constant reference to associated DistributionMapping.
Definition AMReX_FabArrayBase.H:131
bool empty() const noexcept
Definition AMReX_FabArrayBase.H:89
Box fabbox(int K) const noexcept
Return the Kth FABs Box in the FabArray. That is, the region the Kth fab is actually defined on.
Definition AMReX_FabArrayBase.cpp:217
const BoxArray & boxArray() const noexcept
Return a constant reference to the BoxArray that defines the valid region associated with this FabArr...
Definition AMReX_FabArrayBase.H:95
void setFab(int boxno, std::unique_ptr< FAB > elem)
Explicitly set the Kth FAB in the FabArray to point to elem.
Definition AMReX_FabArray.H:2357
void ParallelCopy(const FabArray< FAB > &src, const Periodicity &period=Periodicity::NonPeriodic(), CpOp op=FabArrayBase::COPY)
Definition AMReX_FabArray.H:849
typename std::conditional_t< IsBaseFab< BaseFab< GpuComplex< T > > >::value, BaseFab< GpuComplex< T > >, FABType >::value_type value_type
Definition AMReX_FabArray.H:360
void define(const BoxArray &bxs, const DistributionMapping &dm, int nvar, int ngrow, const MFInfo &info=MFInfo(), const FabFactory< FAB > &factory=DefaultFabFactory< FAB >())
Define this FabArray identically to that performed by the constructor having an analogous function si...
Definition AMReX_FabArray.H:2173
void LocalCopy(FabArray< SFAB > const &src, int scomp, int dcomp, int ncomp, IntVect const &nghost)
Perform local copy of FabArray data.
Definition AMReX_FabArray.H:1954
A collection (stored as an array) of FArrayBox objects.
Definition AMReX_MultiFab.H:40
This provides length of period for periodic domains. 0 means it is not periodic in that direction....
Definition AMReX_Periodicity.H:17
static const Periodicity & NonPeriodic() noexcept
Definition AMReX_Periodicity.cpp:52
This class is a thin wrapper around std::vector. Unlike vector, Vector::operator[] provides bound che...
Definition AMReX_Vector.H:28
amrex_long Long
Definition AMReX_INT.H:30
void ParallelForOMP(T n, L const &f) noexcept
Performance-portable kernel launch function with optional OpenMP threading.
Definition AMReX_GpuLaunch.H:319
Arena * The_Arena()
Definition AMReX_Arena.cpp:805
int NProcs() noexcept
Definition AMReX_ParallelDescriptor.H:255
Definition AMReX_FFT_Helper.H:52
Direction
Definition AMReX_FFT_Helper.H:54
void dtod_memcpy_async(void *p_d_dst, const void *p_d_src, const std::size_t sz) noexcept
Definition AMReX_GpuDevice.H:449
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
MPI_Comm CommunicatorSub() noexcept
sub-communicator for current frame
Definition AMReX_ParallelContext.H:70
int MyProcSub() noexcept
my sub-rank in current frame
Definition AMReX_ParallelContext.H:76
int local_to_global_rank(int rank) noexcept
translate between local rank and global rank
Definition AMReX_ParallelContext.H:95
int NProcsSub() noexcept
number of ranks in current frame
Definition AMReX_ParallelContext.H:74
BoxND< 3 > Box
Box is an alias for amrex::BoxND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:30
bool is_aligned(const void *p, std::size_t alignment) noexcept
Return whether the address p is aligned to alignment bytes.
Definition AMReX_Arena.H:39
BoxArray decompose(Box const &domain, int nboxes, Array< bool, 3 > const &decomp, bool no_overlap)
Decompose domain box into BoxArray.
Definition AMReX_BoxArray.cpp:1947
IntVectND< 3 > IntVect
IntVect is an alias for amrex::IntVectND instantiated with AMREX_SPACEDIM.
Definition AMReX_BaseFwd.H:33
void Abort(const std::string &msg)
Print out message to cerr and exit via abort().
Definition AMReX.cpp:240
void ParallelCopy(MF &dst, MF const &src, int scomp, int dcomp, int ncomp, IntVect const &ng_src=IntVect(0), IntVect const &ng_dst=IntVect(0), Periodicity const &period=Periodicity::NonPeriodic())
dst = src w/ MPI communication
Definition AMReX_FabArrayUtility.H:2019
__host__ __device__ constexpr T elemwiseMin(T const &a, T const &b) noexcept
Return the element-wise minimum of the given values for types like XDim3.
Definition AMReX_Algorithm.H:62
Definition AMReX_FFT_Helper.H:64
bool twod_mode
Definition AMReX_FFT_Helper.H:75
bool oned_mode
We might have a special twod_mode: nx or ny == 1 && nz > 1.
Definition AMReX_FFT_Helper.H:78
int batch_size
Batched FFT size. Only support in R2C, not R2X.
Definition AMReX_FFT_Helper.H:81
DomainStrategy domain_strategy
Domain composition strategy.
Definition AMReX_FFT_Helper.H:66
int nprocs
Max number of processes to use.
Definition AMReX_FFT_Helper.H:84
int pencil_threshold
Definition AMReX_FFT_Helper.H:70
Definition AMReX_FFT_Helper.H:188
std::conditional_t< std::is_same_v< float, T >, cuComplex, cuDoubleComplex > VendorComplex
Definition AMReX_FFT_Helper.H:192
FabArray memory allocation information.
Definition AMReX_FabArray.H:66
MFInfo & SetAlloc(bool a) noexcept
Definition AMReX_FabArray.H:73
This class specializes behaviour on local copies and unpacking receive buffers.
Definition AMReX_NonLocalBC.H:626
This is the index mapping based on the DTOS MultiBlockDestToSrc.
Definition AMReX_NonLocalBC.H:216
Contains information about which components take part of the data transaction.
Definition AMReX_NonLocalBC.H:539
int n_components
Definition AMReX_NonLocalBC.H:542
Communication datatype (note: this structure also works without MPI)
Definition AMReX_ccse-mpi.H:78