在CUDA中对许多小数组进行排序



我正在CUDA中实现中值过滤器。对于一个特定的像素,我提取它的邻居对应于像素周围的一个窗口,比如N x N (3 x 3)窗口,现在有一个N x N元素的数组。我不打算在我的应用程序中使用超过10 x 10个元素的窗口。

这个数组现在本地存在于内核中,并且已经加载到设备内存中。从我之前读过的SO帖子来看,最常见的排序算法是由Thrust实现的。但是,推力只能从主机调用。Thread - Thrust在用户编写的内核

是否有一种快速有效的方法来对内核内的N x N元素的小数组进行排序?

如果元素数量固定且较小,则可以使用排序网络(http://pages.ripco.net/~jgamble/nw.html)。它为固定数量的元素提供了固定数量的比较/交换操作。

你的问题是在CUDA中对许多小数组排序。

根据Robert在评论中的建议,CUB提供了一个可能的解决方案来面对这个问题。下面我报告一个围绕Robert在cub BlockRadixSort的代码构建的示例:如何处理大的tile大小或对多个tile进行排序?

这个想法是分配小数组排序到不同的线程块,然后使用cub::BlockRadixSort排序每个数组。提供了两个版本,一个将小数组加载到共享内存,另一个将小数组加载到共享内存。

让我最后指出,你的说法,CUDA推力是不能从内核内部调用不再是正确的。你链接到的用户编写内核的帖子Thrust已经被其他答案更新了。

#include <cub/cub.cuh>
#include <stdio.h>
#include <stdlib.h>
#include "Utilities.cuh"
using namespace cub;
/**********************************/
/* CUB BLOCKSORT KERNEL NO SHARED */
/**********************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void BlockSortKernel(int *d_in, int *d_out)
{
    // --- Specialize BlockLoad, BlockStore, and BlockRadixSort collective types
    typedef cub::BlockLoad      <int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE>   BlockLoadT;
    typedef cub::BlockStore     <int*, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_STORE_TRANSPOSE>  BlockStoreT;
    typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD>                         BlockRadixSortT;
    // --- Allocate type-safe, repurposable shared memory for collectives
    __shared__ union {
        typename BlockLoadT     ::TempStorage load;
        typename BlockStoreT    ::TempStorage store;
        typename BlockRadixSortT::TempStorage sort;
    } temp_storage;
    // --- Obtain this block's segment of consecutive keys (blocked across threads)
    int thread_keys[ITEMS_PER_THREAD];
    int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
    BlockLoadT(temp_storage.load).Load(d_in + block_offset, thread_keys);
    __syncthreads(); 
    // --- Collectively sort the keys
    BlockRadixSortT(temp_storage.sort).Sort(thread_keys);
    __syncthreads(); 
    // --- Store the sorted segment
    BlockStoreT(temp_storage.store).Store(d_out + block_offset, thread_keys);
}
/*******************************/
/* CUB BLOCKSORT KERNEL SHARED */
/*******************************/
template <int BLOCK_THREADS, int ITEMS_PER_THREAD>
__global__ void shared_BlockSortKernel(int *d_in, int *d_out)
{
    // --- Shared memory allocation
    __shared__ int sharedMemoryArray[BLOCK_THREADS * ITEMS_PER_THREAD];
    // --- Specialize BlockStore and BlockRadixSort collective types
    typedef cub::BlockRadixSort <int , BLOCK_THREADS, ITEMS_PER_THREAD> BlockRadixSortT;
    // --- Allocate type-safe, repurposable shared memory for collectives
    __shared__ typename BlockRadixSortT::TempStorage temp_storage;
    int block_offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD);
    // --- Load data to shared memory
    for (int k = 0; k < ITEMS_PER_THREAD; k++) sharedMemoryArray[threadIdx.x * ITEMS_PER_THREAD + k]  = d_in[block_offset + threadIdx.x * ITEMS_PER_THREAD + k];
    __syncthreads();
    // --- Collectively sort the keys
    BlockRadixSortT(temp_storage).Sort(*static_cast<int(*)[ITEMS_PER_THREAD]>(static_cast<void*>(sharedMemoryArray + (threadIdx.x * ITEMS_PER_THREAD))));
    __syncthreads();
    // --- Write data to shared memory
    for (int k = 0; k < ITEMS_PER_THREAD; k++) d_out[block_offset + threadIdx.x * ITEMS_PER_THREAD + k] = sharedMemoryArray[threadIdx.x * ITEMS_PER_THREAD + k];
}
/********/
/* MAIN */
/********/
int main() {
    const int numElemsPerArray  = 8;
    const int numArrays         = 4;
    const int N                 = numArrays * numElemsPerArray;
    const int numElemsPerThread = 4;
    const int RANGE             = N * numElemsPerThread;
    // --- Allocating and initializing the data on the host
    int *h_data = (int *)malloc(N * sizeof(int));
    for (int i = 0 ; i < N; i++) h_data[i] = rand() % RANGE;
    // --- Allocating the results on the host
    int *h_result1 = (int *)malloc(N * sizeof(int));
    int *h_result2 = (int *)malloc(N * sizeof(int));
    // --- Allocating space for data and results on device
    int *d_in;      gpuErrchk(cudaMalloc((void **)&d_in,   N * sizeof(int)));
    int *d_out1;    gpuErrchk(cudaMalloc((void **)&d_out1, N * sizeof(int)));
    int *d_out2;    gpuErrchk(cudaMalloc((void **)&d_out2, N * sizeof(int)));
    // --- BlockSortKernel no shared
    gpuErrchk(cudaMemcpy(d_in, h_data, N*sizeof(int), cudaMemcpyHostToDevice));
    BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread><<<numArrays, numElemsPerArray / numElemsPerThread>>>(d_in, d_out1); 
    gpuErrchk(cudaMemcpy(h_result1, d_out1, N*sizeof(int), cudaMemcpyDeviceToHost));
    printf("BlockSortKernel no sharednn");
    for (int k = 0; k < numArrays; k++) 
        for (int i = 0; i < numElemsPerArray; i++)
            printf("Array nr. %i; Element nr. %i; Value %in", k, i, h_result1[k * numElemsPerArray + i]);
    // --- BlockSortKernel with shared
    gpuErrchk(cudaMemcpy(d_in, h_data, N*sizeof(int), cudaMemcpyHostToDevice));
    shared_BlockSortKernel<N / numArrays / numElemsPerThread, numElemsPerThread><<<numArrays, numElemsPerArray / numElemsPerThread>>>(d_in, d_out2); 
    gpuErrchk(cudaMemcpy(h_result2, d_out2, N*sizeof(int), cudaMemcpyDeviceToHost));
    printf("nnBlockSortKernel with sharednn");
    for (int k = 0; k < numArrays; k++) 
        for (int i = 0; i < numElemsPerArray; i++)
            printf("Array nr. %i; Element nr. %i; Value %in", k, i, h_result2[k * numElemsPerArray + i]);
    return 0;
}

如果您使用的是CUDA 5。X,你可以使用动态并行。您可以在过滤器内核中创建一个子内核来完成排序工作。至于如何通过CUDA进行排序,可以使用一些归纳技巧

最新更新