Example 1.4 Numerical Integration with MPI_Gather Collective Communication

This example demonstrates parallel integration (see Example 1 for detail) via the following MPI library routines:

Example 1.4 Fortran Code


      Program Example1_4
c#######################################################################
c#                                                                     #
c# This is an MPI example on parallel integration to demonstrate the   #
c# use of:                                                             #
c#                                                                     #
c# * MPI_Init, MPI_Comm_rank, MPI_Comm_size, MPI_Finalize              #
c# * MPI_Gather                                                        #
c#                                                                     #
c# Dr. Kadin Tseng                                                     #
c# Scientific Computing and Visualization                              #
c# Boston University                                                   #
c# 1998                                                                #
c#                                                                     #
c#######################################################################

      implicit none
      integer n, p, i, j, proc, ierr, master, myid, tag, comm
      real h, a, b, integral, pi, ai, my_int, integral_sum, buf(50)
      include "mpif.h"  ! brings in pre-defined MPI constants, ...
      data master/0/    ! processor 0 collects integral sums from other processors

      comm = MPI_COMM_WORLD       
      call MPI_Init(ierr)                        ! starts MPI
      call MPI_Comm_rank(comm, myid, ierr)      ! get current proc ID
      call MPI_Comm_size(comm, p, ierr)          ! get number of procs

      pi = acos(-1.0)   !  = 3.14159...
      a = 0.0           ! lower limit of integration
      b = pi*1./2.      ! upper limit of integration
      n = 500           ! number of intervals in (b-a)/p
      h = (b-a)/n/p     ! length of increment

      ai = a + myid*n*h ! lower limit of integration for partition myid
      my_int = integral(ai, h, n) 
      write(*,"('Process ',i2,' has the partial sum of',f10.6)")
     &          myid,my_int

      call MPI_Gather( 
     &     my_int, 1, MPI_REAL,      ! Send buffer, size, data type
     &     buf, 1, MPI_REAL,      ! Receive buffer, size, data type
     &     master,      ! destination process
     &     comm, ierr)

      if(myid .eq. master) then
        integral_sum = 0.0
        do i=1,p
          integral_sum = integral_sum + buf(i)
        enddo
        print *,'The Integral =',integral_sum
      endif

      call MPI_Finalize(ierr)                            ! let MPI finish up ...

      end
      real function integral(ai, h, n)
      implicit none
      integer n, j
      real h, ai, aij

      integral = 0.0                ! initialize integral
      do j=0,n-1                    ! sum integrals
        aij = ai +(j+0.5)*h         ! abscissa mid-point
        integral = integral + cos(aij)*h
      enddo

      return
      end

Example 1.4 C code


#include <mpi.h>
#include <math.h>
#include <stdio.h>
/* Prototype */
float integral(float ai, float h, int n);
int main(int argc, char* argv[])
/*######################################################################
 #                                                                     #
 # This is an MPI example on parallel integration to demonstrate the   #
 # use of:                                                             #
 #                                                                     #
 # * MPI_Init, MPI_Comm_rank, MPI_Comm_size, MPI_Finalize              #
 # * MPI_Gather                                                        #
 #                                                                     #
 # Dr. Kadin Tseng                                                     #
 # Scientific Computing and Visualization                              #
 # Boston University                                                   #
 # 1998                                                                #
 #                                                                     #
 #####################################################################*/
{
      int n, p, myid, tag, proc, ierr, i;
      float h, integral_sum, a, b, ai, pi, my_int, buf[50];
      int master = 0;  /* processor performing total sum */
      MPI_Comm comm;

      comm = MPI_COMM_WORLD;       
      ierr = MPI_Init(&argc,&argv);           /* starts MPI */
      MPI_Comm_rank(comm, &myid);           /* get current process id */
      MPI_Comm_size(comm, &p);               /* get number of processes */

      pi = acos(-1.0);  /* = 3.14159... */
      a = 0.;           /* lower limit of integration */
      b = pi*1./2.;     /* upper limit of integration */
      n = 500;          /* number of increment within each process */
      tag = 123;        /* set the tag to identify this particular job */
      h = (b-a)/n/p;    /* length of increment */

      ai = a + myid*n*h;  /* lower limit of integration for partition myid */
      my_int = integral(ai, h, n);   /* 0<=myid<=p-1 */

      printf("Process %d has the partial sum of %fn", myid,my_int);

      MPI_Gather(      /* collects my_int from all processes to master */
            &my_int, 1, MPI_FLOAT,      /* send buffer, size, data type */
            buf, 1, MPI_FLOAT,     /* receive buffer, size, data type */
            master, comm); 

      if(myid == master) {
        integral_sum = 0.0;
        for (i=0; i< i++) {
          integral_sum += buf[i];
        }
        printf("The Integral =%fn",integral_sum);
      }

      MPI_Finalize();                  /* let MPI finish up ... */
}

float integral(float ai, float h, int n)
{
      int j;
      float aij, integ;

      integ = 0.0;                 /* initialize */
      for (j=0;j<j++) {          /* sum integrals */
        aij = ai + (j+0.5)*h;      /* mid-point */
        integ += cos(aij)*h;
      }
      return integ;
}

Example 1 | Example 1.1 | Example 1.2 | Example 1.3 | Example 1.4 | Example 1.5