3#include <AMReX_Config.H>
9#if defined(AMREX_USE_CUDA)
34 using U = std::conditional_t<std::is_const_v<T>,
Long const,
Long>;
48template <
typename T,
template <
typename>
class V>
70 mat.resize(num_non_zeros);
105template <
typename C,
typename T,
template<
typename>
class AD,
template<
typename>
class AS,
106 std::enable_if_t<std::is_same_v<C,Gpu::HostToDevice> ||
107 std::is_same_v<C,Gpu::DeviceToHost> ||
108 std::is_same_v<C,Gpu::DeviceToDevice>,
int> = 0>
118 dst.
mat.resize(src.
mat.size());
136template <
typename T,
template <
typename>
class V>
139 if (nnz <= 0) {
return; }
143#if defined(AMREX_USE_CUDA) || defined(AMREX_USE_HIP)
148 constexpr int nthreads = 256;
153 auto nr =
int(nrows());
154 int nblocks = (nr + nwarps_per_block-1) / nwarps_per_block;
157 auto* pmat = mat.data();
158 auto* pcol = col_index.data();
159 auto* prow = row_offset.data();
162 auto* d_needs_fallback = needs_fallback.
data();
164 amrex::launch_global<nthreads><<<nblocks, nthreads, 0, stream>>>
168 int r =
int(blockIdx.x)*nwarps_per_block + wid;
171 Long const b = prow[r];
172 Long const e = prow[r+1];
173 auto const len =
int(e - b);
175 if (len <= 1)
return;
181 sorted = sorted && (pcol[b+i-1] <= pcol[b+i]);
183#if defined(AMREX_USE_CUDA)
184 if (__all_sync(0xffffffff, sorted)) {
return; }
186 if (__all(sorted)) {
return; }
192 if (len <= ITEMS_PER_WARP)
194#if defined(AMREX_USE_CUDA)
195 using WarpSort = cub::WarpMergeSort<Long, ITEMS_PER_THREAD, Gpu::Device::warp_size, T>;
196 __shared__
typename WarpSort::TempStorage temp_storage[nwarps_per_block];
197#elif defined(AMREX_USE_HIP)
198 using WarpSort = rocprim::warp_sort<Long, Gpu::Device::warp_size, T>;
199 __shared__
typename WarpSort::storage_type temp_storage[nwarps_per_block];
202 Long keys[ITEMS_PER_THREAD];
203 T values[ITEMS_PER_THREAD];
206 for (
int i = 0; i < ITEMS_PER_THREAD; ++i) {
207 int idx = lane * ITEMS_PER_THREAD + i;
209 keys[i] = pcol[b + idx];
210 values[i] = pmat[b + idx];
212 keys[i] = std::numeric_limits<Long>::max();
218 WarpSort{}.sort(keys, values, temp_storage[wid]),
219 WarpSort(temp_storage[wid]).Sort(
223 for (
int i = 0; i < ITEMS_PER_THREAD; ++i) {
224 int idx = lane * ITEMS_PER_THREAD + i;
226 pcol[b + idx] = keys[i];
227 pmat[b + idx] = values[i];
237 auto* h_needs_fallback = needs_fallback.copyToHost();
239 if (*h_needs_fallback)
241 V<Long> col_index_out(col_index.size());
242 V<T> mat_out(mat.size());
243 auto* d_col_out = col_index_out.data();
244 auto* d_val_out = mat_out.data();
246 std::size_t temp_bytes = 0;
249 rocprim::segmented_radix_sort_pairs,
250 cub::DeviceSegmentedRadixSort::SortPairs)
251 (
nullptr, temp_bytes, pcol, d_col_out, pmat, d_val_out,
252 nnz, nr, prow, prow+1, 0,
int(
sizeof(
Long)*CHAR_BIT),
258 rocprim::segmented_radix_sort_pairs,
259 cub::DeviceSegmentedRadixSort::SortPairs)
260 (d_temp, temp_bytes, pcol, d_col_out, pmat, d_val_out,
261 nnz, nr, prow, prow+1, 0,
int(
sizeof(
Long)*CHAR_BIT),
264 std::swap(col_index, col_index_out);
265 std::swap(mat, mat_out);
275#elif defined(AMREX_USE_SYCL)
294template <
typename T,
template <
typename>
class V>
297 if (nnz <= 0) {
return; }
299 constexpr int SMALL = 128;
317 for (
Long r = 0; r < nr; ++r) {
318 Long const b = row_offset[r ];
319 Long const e = row_offset[r+1];
320 auto const len =
int(e - b);
322 if (len <= 1) {
continue; }
325 for (
int i = 1; i < len; ++i) {
326 if (col_index[b+i-1] > col_index[b+i]) {
331 if (sorted) {
continue; }
335 for (
int i = 0; i < len; ++i) {
336 scols[i] = col_index[b+i];
337 svals[i] = mat [b+i];
339 for (
int i = 1; i < len; ++i) {
343 while (j > 0 && scols[j-1] > c) {
344 scols[j] = scols[j-1];
345 svals[j] = svals[j-1];
351 for (
int i = 0; i < len; ++i) {
352 col_index[b+i] = scols[i];
353 mat [b+i] = svals[i];
360 for (
int i = 0; i < len; ++i) {
361 lcols[i] = col_index[b+i];
362 lvals[i] = mat [b+i];
366 std::sort(perm.begin(), perm.end(),
367 [&] (
int i0,
int i1) {
368 return lcols[i0] < lcols[i1];
371 for (
int out = 0; out < len; ++out) {
372 auto const in = perm[out];
373 col_index[b+out] = lcols[in];
374 mat [b+out] = lvals[in];
#define AMREX_ALWAYS_ASSERT(EX)
Definition AMReX_BLassert.H:50
#define AMREX_RESTRICT
Definition AMReX_Extension.H:32
#define AMREX_HIP_OR_CUDA(a, b)
Definition AMReX_GpuControl.H:21
#define AMREX_GPU_SAFE_CALL(call)
Definition AMReX_GpuError.H:63
#define AMREX_GPU_ERROR_CHECK()
Definition AMReX_GpuError.H:151
#define AMREX_GPU_DEVICE
Definition AMReX_GpuQualifiers.H:18
virtual void free(void *pt)=0
A pure virtual function for deleting the arena pointed to by pt.
virtual void * alloc(std::size_t sz)=0
Definition AMReX_GpuBuffer.H:18
T const * data() const noexcept
Definition AMReX_GpuBuffer.H:45
static constexpr int warp_size
Definition AMReX_GpuDevice.H:236
amrex_long Long
Definition AMReX_INT.H:30
Arena * The_Arena()
Definition AMReX_Arena.cpp:805
__host__ __device__ AMREX_FORCE_INLINE void AddNoRet(T *sum, T value) noexcept
Definition AMReX_GpuAtomic.H:284
void copyAsync(HostToDevice, InIter begin, InIter end, OutIter result) noexcept
A host-to-device copy routine. Note this is just a wrapper around memcpy, so it assumes contiguous st...
Definition AMReX_GpuContainers.H:228
static constexpr DeviceToHost deviceToHost
Definition AMReX_GpuContainers.H:106
static constexpr HostToDevice hostToDevice
Definition AMReX_GpuContainers.H:105
void streamSynchronize() noexcept
Definition AMReX_GpuDevice.H:310
gpuStream_t gpuStream() noexcept
Definition AMReX_GpuDevice.H:291
Definition AMReX_Amr.cpp:49
void duplicateCSR(C c, CSR< T, AD > &dst, CSR< T, AS > const &src)
Copy CSR buffers between memory spaces asynchronously.
Definition AMReX_CSR.H:116
const int[]
Definition AMReX_BLProfiler.cpp:1664
Owning CSR container backed by AMReX resizable vectors.
Definition AMReX_CSR.H:49
V< Long > row_offset
Definition AMReX_CSR.H:52
Long nrows() const
Number of logical rows represented by the CSR offset array.
Definition AMReX_CSR.H:56
Long nnz
Definition AMReX_CSR.H:53
void sort()
Sort each row by column index. Uses GPU acceleration when possible.
Definition AMReX_CSR.H:137
CsrView< T > view()
Mutable view of the underlying buffers.
Definition AMReX_CSR.H:77
void sort_on_host()
Host-only fallback that sorts column indices row by row.
Definition AMReX_CSR.H:295
CsrView< T const > view() const
Const view of the underlying buffers.
Definition AMReX_CSR.H:83
CsrView< T const > const_view() const
Convenience alias for view() const.
Definition AMReX_CSR.H:89
void resize(Long num_rows, Long num_non_zeros)
Resize the storage to accommodate num_rows and num_non_zeros entries.
Definition AMReX_CSR.H:69
V< Long > col_index
Definition AMReX_CSR.H:51
V< T > mat
Definition AMReX_CSR.H:50
Lightweight non-owning CSR view that can point to host or device buffers.
Definition AMReX_CSR.H:33
std::conditional_t< std::is_const_v< T >, Long const, Long > U
Definition AMReX_CSR.H:34
T *__restrict__ mat
Definition AMReX_CSR.H:35
Long nrows
Definition AMReX_CSR.H:39
Long nnz
Definition AMReX_CSR.H:38
U *__restrict__ row_offset
Definition AMReX_CSR.H:37
U *__restrict__ col_index
Definition AMReX_CSR.H:36