Message Passing Interface

The MPI is a standard defining a way for multiple computers to communicate with each other across a network. It is mainly used for parallel computing, MPI is used on the_hivemind to perform calculations.

pi.c

This is a program used to calculate Pi using the Leibniz formula for Pi.

#include <mpi.h>
#include <time.h>
#include <stdio.h>

#define PI_SIZE 500000000
typedef long I;
typedef double D;

void worker(I id, I sz) {
    D result = 0.0;
    D mult = sz % 2 == 0 ? 1.0 : -1.0;
    D sign = id % 2 == 0 ? 1.0 : -1.0;

    I complete = 0;
    I size_div20 = PI_SIZE / 20;

    for (I i = id; i < PI_SIZE; i += sz) {
        if ((i - id) % size_div20 == 0) printf("Node %d: %d%%\n", id, complete += 5);
        result += sign / (double)(2 * i + 1);
        sign *= mult;
    }

    MPI_Send(&result, 1, MPI_DOUBLE, 0, 1, MPI_COMM_WORLD);
}

void gatherer(I sz) {
    I start = time(NULL);
    MPI_Status status;

    printf("Computing pi...");

    D result = 0.0, x;
    for (I i = 1; i < sz; ++i) {
        MPI_Recv(&x, 1, MPI_DOUBLE, i, 1, MPI_COMM_WORLD, &status);
        printf("Received node #%d! %.10lf\n", i, x);
        result += x;
    }

    printf("Result: %.15lf\n", result * 4.0);

    FILE *f = fopen("pi.txt", "w");
    fprintf(f, "pi = %0.40lf\nTime taken: %ld\n", result * 4.0, time(NULL) - start);
    fclose(f);
}

int main(void) {
    int size, rank;
    MPI_Init(NULL, NULL);
 
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (rank == 0)
        gatherer(size);
    else
        worker(rank - 1, size - 1);

    MPI_Finalize();
    return 0;
}