Example-1-MPI_Scatterv and MPI_Gather

目的:利用例子說明collective communication函數功能
Example 1: This program shows how to use MPI_Scatterv and MPI_Gather. 假設有np個數據,要分配到nprocs個進程中,最後一個進程得到平均分配之後多餘的數據。每個進程對接收到的數據進行一些改變,然後求和,最後master進程收集求和結果並打印輸出。

#include "mpi.h"
#include <cstdio>
#include <math.h>
#include <iostream>

#define np 105  //number of elements need to be scattered at root
const int MASTER = 0; // Rank of of the master process

int main(int argc, char* argv[])
{
    double PosX[np]; //elements need to be scattered
    double *RecPosX; //receive buffer
    int nProcs , Rank ;//number of processes, and id of rank
    int i,j;
    int reminder; //reminder if it is not divisible
    double StartTime,EndTime; //timing


    MPI_Init ( & argc , & argv );
    MPI_Comm_size ( MPI_COMM_WORLD , & nProcs );
    MPI_Comm_rank ( MPI_COMM_WORLD , & Rank );

    StartTime = MPI_Wtime(); //start timing

    //initialize array
    int* sendcounts = NULL ;
    int* displs = NULL ;

    if ( Rank == MASTER )
    {
        for(i=0;i<np;i++)
        {
            PosX[i] = i*1.0; //initialize elements in master process
        }
    }

    sendcounts = new int [ nProcs ]; //allocate memory for array storing number of elements
    reminder = np%nProcs; //remineder

    //calculate number of elements need to be scattered in each process
    for(i=0;i<nProcs;i++)
    {
        sendcounts [i] = int(1.0*np/nProcs); 
    }
    //number of elements in the last process
        sendcounts [nProcs-1] = sendcounts [nProcs-1] + reminder;
    //calculate corresponding displacement  

    displs = new int [ nProcs ];
    for(i=0;i<nProcs;i++)
        {
            displs[i] = 0;
            for(j=0;j<i;j++)
            {
                displs[i] = displs[i] + sendcounts[j];
            }
        }
        //allocate the receive buffer   
        RecPosX = (double*)malloc(sendcounts [Rank]*sizeof(double));
        //now everything is ready and we can start MPI_Scatterv operation.  
        MPI_Scatterv(PosX, sendcounts , displs, MPI_DOUBLE, 
                     RecPosX, sendcounts[Rank], MPI_DOUBLE, 0, MPI_COMM_WORLD);

    //output results after MPI_Scatterv operation
    //std::cout<<"My Rank = "<<Rank<<" And data I received are:"<<std::endl;
    double sum=0;
    double *rbuf;

    rbuf=(double*)malloc(nProcs*sizeof(double));

    for(i=0;i<sendcounts[Rank];i++)
    {
        RecPosX[i] = RecPosX[i] + Rank/10.0;
        sum += RecPosX[i];
        //std::cout<<RecPosX[i]<<" ";
        //if(i%10 == 0 && i != 0)std::cout<<std::endl;
    }
        std::cout<<std::endl;

    MPI_Gather(&sum,1,MPI_DOUBLE,rbuf,1,MPI_DOUBLE,MASTER,MPI_COMM_WORLD);

    EndTime = MPI_Wtime();
    if(Rank == MASTER)
    {
        for(i=0;i<nProcs;i++)
        {
            std::cout<<"Rank = "<<i<<" Sum= "<<rbuf[i]<<std::endl;
        }
        std::cout<<"Total Time Spending For: "<<nProcs<<" Is "<<EndTime-StartTime<<" :Second"<<std::endl;
    }

    MPI_Finalize();
    return 0; 

}

Assuming np=105, nprocs=4. then the output will be like these:

Rank=0 Sum=325
Rank=1 Sum=1003.6
Rank=2 Sum=1682.6
Rank=3 Sum=2465.1

發佈了16 篇原創文章 · 獲贊 0 · 訪問量 2萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章