在MPI中均匀分布阵列数据



我想用C语言解决一个使用MPI的负载平衡问题。每个MPI任务都有一个不同大小的数组(由整数组成)。

初始情况:数据在MPI进程之间分布不均。我们希望在每个进程中分配数据后得到相同长度的数组。

我们asssume屏蔽罩的每个过程都包含一个数组大小:

int nbTask;
int myRank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nbTask);
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
time_t t;
srand(time(NULL) + myRank);
int size = (rand() % (60 - 40 + 1)) + 40;

然后计算每个数组在平衡后的大小:

int global_sum;
int new_size;
MPI_Allreduce(&size, &global_sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

new_size = (int)(((float)global_sum/nbTask) + 1);

int exScan;
MPI_Exscan(&size, &exScan, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);

你能想出一种算法将数据从一个处理器发送到另一个处理器,以便在每个处理器中获得相同大小的数组吗?

显然,解决方案使用MPI_Scan或MPI_Exscan来查找要发送到处理器p-1和p+1的内容

我的解决方案是:


#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <mpi.h>

int main(int argc, char* argv[])
{
int nbTask;
int myRank;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nbTask);
MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
MPI_Status status;
MPI_Request request[2];

time_t t;
int offset_left;
int offset_right;
srand(time(NULL) + myRank);
int size = (rand() % (60 - 40 + 1)) + 40;
printf("%d  %d  n",myRank, size);

int global_sum;
int new_size;
MPI_Allreduce(&size, &global_sum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
printf("------- global_sum : %d n", global_sum);

new_size = (global_sum + myRank) / nbTask;
printf("task %d - new_size : %d n", myRank, new_size);
int new_array[new_size];

int exScan_size;
int exScan_newSize;
MPI_Exscan(&size, &exScan_size, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
MPI_Exscan(&new_size, &exScan_newSize, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
if (myRank == 0) {
exScan_size = 0;
exScan_newSize = 0;
}
printf("task %d - exScan_size : %d n", myRank, exScan_size);
int array[size];
for(int i = 0; i < size; i++) {
array[i] = exScan_size + i;
}
offset_left = exScan_size - exScan_newSize;
offset_right = offset_left + size - new_size;
printf("task %d - offset_left : %d n", myRank, offset_left);
printf("task %d - offset_right : %d n", myRank, offset_right);
int data_left_size = abs(offset_left);
int data_right_size = abs(offset_right);
int data_left[data_left_size];
int data_right[data_right_size];

for(int i = 0; i < size; i++) {
if((i + offset_left) >= 0 && (i + offset_left) < new_size) 
new_array[i + offset_left] = array[i];
}

if(offset_left < 0) {
for(int i = 0; i < data_left_size; i++) {
data_left[i] = array[i];
}
if(myRank != 0)
MPI_Isend(&(data_left[0]), data_left_size, MPI_INT, myRank-1, 0, MPI_COMM_WORLD, &request[0]);
}
else if (offset_left > 0) {
MPI_Recv(&(data_left[0]), data_left_size, MPI_INT, myRank-1, 0, MPI_COMM_WORLD, &status);
for(int i = 0; i < data_left_size; i++) {
new_array[i] = data_left[i];
}
}
if(offset_right > 0) {
for(int i = 0; i < data_right_size; i++) {
data_right[i] = array[size - data_right_size + i];
}
if(myRank != nbTask - 1)
MPI_Isend(&(data_right[0]), data_right_size, MPI_INT, myRank+1, 0, MPI_COMM_WORLD, &request[0]);
}
else if (offset_right < 0) {
MPI_Recv(&(data_right[0]), data_right_size, MPI_INT, myRank+1, 0, MPI_COMM_WORLD, &status);
for(int i = 0; i < data_right_size; i++) {
new_array[new_size - data_right_size + i] = data_right[i];
}
}
printf("-------------------------------n");
printf("task %d - new_array n", myRank);
for(int loop = 0; loop < new_size; loop++)
printf("%d ", new_array[loop]);
printf("n-------------------------------n");
MPI_Finalize();
return 0;
}

最新更新