CUDA Cooperative Groups 例子
- 一.复现步骤
- 二.输出
CUDA Cooperative Groups是CUDA编程模型中引入的一组高级特性,旨在提供更灵活的线程组织和同步机制。通过Cooperative Groups,开发者可以在不同层次上组织线程,并执行更高效的并行操作。包括:
- 网格组(Grid Group):包含整个网格中所有线程的组。
- 线程块组(Block Group):包含线程块中所有线程的组。
- 瓦片组(Tile Group):将线程块划分为更小的线程子组,称为瓦片。
下文包含的测例:
- 测试一:借助grid_group同步,将tid=0的数据复制给其它线程
- 测试二:借助thread_block_tile同步,将每个thread block中的数据倒排
- 测试三:tile内和
- 测试四:tile内广播
一.复现步骤
tee cooperative_groups.cu<<-'EOF'
#include <iostream>
#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
#include <assert.h>
#include <cstdio>
#include <cuda.h>
#include <cooperative_groups.h>
#include <cooperative_groups/reduce.h>
namespace cg = cooperative_groups;
#define CHECK_CUDA(call) \
do { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
std::cerr << "CUDA error at " << __FILE__ << ":" << __LINE__; \
std::cerr << " code=" << err << " (" << cudaGetErrorString(err) << ")" << std::endl; \
exit(EXIT_FAILURE); \
} \
} while (0)
__device__ float gdata = 0;
/*
测试一:借助grid_group同步,将tid=0的数据复制给其它线程
*/
__global__ void case_0(float *iodata)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
cg::grid_group grid = cg::this_grid();
if(tid==0) gdata=iodata[tid];
grid.sync();
iodata[tid]=gdata;
}
/*
测试二:借助thread_block_tile同步,将每个thread block中的数据倒排
*/
__global__ void case_1(float *iodata)
{
__shared__ float sharedData[256];
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
cg::thread_block block = cg::this_thread_block();
sharedData[threadIdx.x] = iodata[tid];
block.sync();
iodata[tid]=sharedData[blockDim.x-1-threadIdx.x];
}
/*
测试三:tile内和
*/
__global__ void case_2(float *iodata)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
cg::thread_block block = cg::this_thread_block();
cg::thread_block_tile<2> tile2 = cg::tiled_partition<2>(block);
float sum = cg::reduce(tile2, iodata[tid], cg::plus<float>());
tile2.sync();
iodata[tid]=sum;
}
/*
测试三:tile内交换数据
*/
__global__ void case_3(float *iodata)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
cg::thread_block block = cg::this_thread_block();
cg::thread_block_tile<2> tile2 = cg::tiled_partition<2>(block);
float nextValue = tile2.shfl(iodata[tid], (tile2.thread_rank() + 1) % tile2.size());
tile2.sync();
iodata[tid]=nextValue;
}
/*
测试四:tile内广播
*/
__global__ void case_4(float *iodata)
{
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
cg::thread_block block = cg::this_thread_block();
cg::thread_block_tile<4> tile4 = cg::tiled_partition<4>(block);
float value;
//lane 1广播给其它lane
if (tile4.thread_rank() == 1) {
value = iodata[tid];
}
value = tile4.shfl(value, 1);
tile4.sync();
iodata[tid]=value;
}
int main(int argc,char *argv[])
{
int deviceid=0;cudaSetDevice(deviceid);
{
printf(" ----------------- case 0 ----------------- \n");
int block_count=4;
int block_size=4;
int thread_size=block_count*block_size;
float *iodata;
CHECK_CUDA(cudaHostAlloc(&iodata,thread_size*sizeof(float),cudaHostAllocDefault));
for(int i=0;i<thread_size;i++) iodata[i]=i+100;
void *kernelArgs[] = {&iodata};
cudaLaunchCooperativeKernel((void*)case_0, block_count, block_size, kernelArgs);
CHECK_CUDA(cudaDeviceSynchronize());
for(int i=0;i<thread_size;i++)
{
printf("tid:%02d %6.2f\n",i,iodata[i]);
}
CHECK_CUDA(cudaFreeHost(iodata));
}
{
printf(" ----------------- case 1 ----------------- \n");
int block_count=2;
int block_size=4;
int thread_size=block_count*block_size;
float *iodata;
CHECK_CUDA(cudaHostAlloc(&iodata,thread_size*sizeof(float),cudaHostAllocDefault));
for(int i=0;i<thread_size;i++) iodata[i]=i+100;
void *kernelArgs[] = {&iodata};
cudaLaunchCooperativeKernel((void*)case_1, block_count, block_size, kernelArgs);
CHECK_CUDA(cudaDeviceSynchronize());
for(int i=0;i<thread_size;i++)
{
printf("tid:%02d %6.2f\n",i,iodata[i]);
}
CHECK_CUDA(cudaFreeHost(iodata));
}
{
printf(" ----------------- case 2 ----------------- \n");
int block_count=2;
int block_size=8;
int thread_size=block_count*block_size;
float *iodata;
CHECK_CUDA(cudaHostAlloc(&iodata,thread_size*sizeof(float),cudaHostAllocDefault));
for(int i=0;i<thread_size;i++) iodata[i]=i;
void *kernelArgs[] = {&iodata};
cudaLaunchCooperativeKernel((void*)case_2, block_count, block_size, kernelArgs);
CHECK_CUDA(cudaDeviceSynchronize());
for(int i=0;i<thread_size;i++)
{
printf("tid:%02d %6.2f\n",i,iodata[i]);
}
CHECK_CUDA(cudaFreeHost(iodata));
}
{
printf(" ----------------- case 3 ----------------- \n");
int block_count=2;
int block_size=8;
int thread_size=block_count*block_size;
float *iodata;
CHECK_CUDA(cudaHostAlloc(&iodata,thread_size*sizeof(float),cudaHostAllocDefault));
for(int i=0;i<thread_size;i++) iodata[i]=i;
void *kernelArgs[] = {&iodata};
cudaLaunchCooperativeKernel((void*)case_3, block_count, block_size, kernelArgs);
CHECK_CUDA(cudaDeviceSynchronize());
for(int i=0;i<thread_size;i++)
{
printf("tid:%02d %6.2f\n",i,iodata[i]);
}
CHECK_CUDA(cudaFreeHost(iodata));
}
{
printf(" ----------------- case 4 ----------------- \n");
int block_count=2;
int block_size=8;
int thread_size=block_count*block_size;
float *iodata;
CHECK_CUDA(cudaHostAlloc(&iodata,thread_size*sizeof(float),cudaHostAllocDefault));
for(int i=0;i<thread_size;i++) iodata[i]=i;
void *kernelArgs[] = {&iodata};
cudaLaunchCooperativeKernel((void*)case_4, block_count, block_size, kernelArgs);
CHECK_CUDA(cudaDeviceSynchronize());
for(int i=0;i<thread_size;i++)
{
printf("tid:%02d %6.2f\n",i,iodata[i]);
}
CHECK_CUDA(cudaFreeHost(iodata));
}
}
EOF
/usr/local/cuda/bin/nvcc -std=c++17 -arch=sm_86 -lineinfo -o cooperative_groups cooperative_groups.cu \
-I /usr/local/cuda/include -L /usr/local/cuda/lib64 -lcuda
./cooperative_groups
二.输出
----------------- case 0 -----------------
tid:00 100.00
tid:01 100.00
tid:02 100.00
tid:03 100.00
tid:04 100.00
tid:05 100.00
tid:06 100.00
tid:07 100.00
tid:08 100.00
tid:09 100.00
tid:10 100.00
tid:11 100.00
tid:12 100.00
tid:13 100.00
tid:14 100.00
tid:15 100.00
----------------- case 1 -----------------
tid:00 103.00
tid:01 102.00
tid:02 101.00
tid:03 100.00
tid:04 107.00
tid:05 106.00
tid:06 105.00
tid:07 104.00
----------------- case 2 -----------------
tid:00 1.00
tid:01 1.00
tid:02 5.00
tid:03 5.00
tid:04 9.00
tid:05 9.00
tid:06 13.00
tid:07 13.00
tid:08 17.00
tid:09 17.00
tid:10 21.00
tid:11 21.00
tid:12 25.00
tid:13 25.00
tid:14 29.00
tid:15 29.00
----------------- case 3 -----------------
tid:00 1.00
tid:01 0.00
tid:02 3.00
tid:03 2.00
tid:04 5.00
tid:05 4.00
tid:06 7.00
tid:07 6.00
tid:08 9.00
tid:09 8.00
tid:10 11.00
tid:11 10.00
tid:12 13.00
tid:13 12.00
tid:14 15.00
tid:15 14.00
----------------- case 4 -----------------
tid:00 1.00
tid:01 1.00
tid:02 1.00
tid:03 1.00
tid:04 5.00
tid:05 5.00
tid:06 5.00
tid:07 5.00
tid:08 9.00
tid:09 9.00
tid:10 9.00
tid:11 9.00
tid:12 13.00
tid:13 13.00
tid:14 13.00
tid:15 13.00