JacobiHPC/mpi_line/jacobi_mpi_line.c
2016-11-12 21:53:11 +01:00

147 lines
4.2 KiB
C

/*
* MPI version with the matrix subdivided by "lines".
*/
#include <stdio.h>
#include <math.h>
#include <mpi.h>
#include "../config/config.h"
#include "../utils/utils.h"
#define TAG_BORDER 0
void compute_jacobi(int n, double init_value, double threshold, borders b);
int main(int argc, char* argv[]) {
int rank, numprocs;
int n;
double init_value, threshold;
double north, south, east, west;
borders b;
int config_loaded;
configuration config;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
if (rank == 0) {
config_loaded = load_config(&config);
if (config_loaded != 0) {
MPI_Abort(MPI_COMM_WORLD, 1);
}
n = config.n;
threshold = config.threshold;
init_value = config.init_value;
north = config.north;
south = config.south;
east = config.east;
west = config.west;
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&init_value, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&threshold, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&north, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&south, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&east, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&west, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
b.north = north;
b.south = south;
b.east = east;
b.west = west;
int rows;
if (rank == 0) {
rows = n - (n / numprocs) * (numprocs - 1);
} else {
rows = n / numprocs;
}
LOG(printf("[Process %d/%d] rows: %d\n", rank, numprocs, rows));
double **x;
double max_diff, global_max_diff, new_x;
int i, j, iterations;
MPI_Status status;
double startwtime = 0.0, endwtime;
if (rank == 0) {
startwtime = MPI_Wtime();
}
/* LOG(printf("[Process %d/%d] initializing matrix\n", rank, numprocs)); */
/* Initialize the matrix */
x = create_matrix(rows + 2, n + 2);
for (i = 0; i < rows + 2; i++) {
for (j = 1; j <= n; j++) {
x[i][j] = init_value;
}
}
/* Initialize boundary regions */
for (i = 0; i < rows + 2; i++) {
x[i][0] = b.west;
x[i][n + 1] = b.east;
}
if (rank == 0) {
for (i = 1; i <= n + 1; i++) {
x[0][i] = b.north;
}
}
if (rank == numprocs - 1){
for (i = 1; i < n + 1; i++) {
x[rows + 1][i] = b.south;
}
}
/* LOG(printf("[Process %d/%d] matrix initialized\n", rank, numprocs)); */
/* Iterative refinement of x until values converge */
iterations = 0;
do {
max_diff = 0;
global_max_diff = 0;
for (i = 1; i <= rows; i++) {
for (j = 1; j <= n; j++) {
new_x = 0.25 * (x[i - 1][j] + x[i][j + 1] + x[i + 1][j] + x[i][j - 1]);
max_diff = (double) fmax(max_diff, fabs(new_x - x[i][j]));
x[i][j] = new_x;
}
}
if (rank % 2 == 0) {
if (rank != numprocs - 1) {
// Send and receive south border
MPI_Send(&x[rows][0], n + 2, MPI_DOUBLE, rank + 1, TAG_BORDER, MPI_COMM_WORLD);
MPI_Recv(&x[rows + 1][0], n + 2, MPI_DOUBLE, rank + 1, TAG_BORDER, MPI_COMM_WORLD, &status);
}
if (rank != 0) {
// Send and receive north border
MPI_Send(&x[1][0], n + 2, MPI_DOUBLE, rank - 1, TAG_BORDER, MPI_COMM_WORLD);
MPI_Recv(&x[0][0], n + 2, MPI_DOUBLE, rank - 1, TAG_BORDER, MPI_COMM_WORLD, &status);
}
} else {
// Receive and send north border
MPI_Recv(&x[0][0], n + 2, MPI_DOUBLE, rank - 1, TAG_BORDER, MPI_COMM_WORLD, &status);
MPI_Send(&x[1][0], n + 2, MPI_DOUBLE, rank - 1, TAG_BORDER, MPI_COMM_WORLD);
if (rank != numprocs - 1) {
// Receive and send south border
MPI_Recv(&x[rows + 1][0], n + 2, MPI_DOUBLE, rank + 1, TAG_BORDER, MPI_COMM_WORLD, &status);
MPI_Send(&x[rows][0], n + 2, MPI_DOUBLE, rank + 1, TAG_BORDER, MPI_COMM_WORLD);
}
}
/* LOG(printf("[Process %d/%d] max_diff: %f\n", rank, numprocs, max_diff)); */
MPI_Allreduce(&max_diff, &global_max_diff, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
/* LOG(printf("[Process %d/%d] global_max_diff: %f\n", rank, numprocs, global_max_diff)); */
iterations++;
} while (global_max_diff > threshold);
if (rank == 0) {
endwtime = MPI_Wtime();
printf("Wall clock time: %fs\n", endwtime - startwtime);
printf("Iterations: %d\n", iterations);
}
MPI_Finalize();
return 0;
}