Commit 30b25976 authored by abdullh.alsoleman's avatar abdullh.alsoleman

initial

parent eac77e30
# Makefile for MPI program
# Source file
SRC = Q1.c
# Number of processes
NP = 8
# Compiler
CC = mpicc
# Executable name
EXE = ./out
all: $(EXE)
$(EXE): $(SRC)
$(CC) -o $(EXE) $(SRC)
clean:
rm -f $(EXE)
run:
mpirun -np $(NP) -f mpi_hosts $(EXE)
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <mpi.h>
#define ARRAY_SIZE 20
// Function to calculate local prefix sum for each block
void calculate_local_prefix_sum(int* block_array, int block_size, int* local_prefix) {
local_prefix[0] = block_array[0];
for (int i = 1; i < block_size; i++) {
local_prefix[i] = local_prefix[i - 1] + block_array[i];
}
}
// Function to calculate MPI-based prefix sum for a block
void prefix_mpi(int* block_array, int block_size, int* block_prefix, MPI_Comm communicator) {
int rank, size;
MPI_Comm_rank(communicator, &rank);
MPI_Comm_size(communicator, &size);
int* local_prefix = (int*)malloc(block_size * sizeof(int));
local_prefix[0] = block_array[0];
// Calculate local prefix sum for the block
calculate_local_prefix_sum(block_array, block_size, local_prefix);
int* prefix_sums = (int*)malloc(size * sizeof(int));
// Gather the last element of each local prefix to compute the prefix sums at rank 0
MPI_Gather(&local_prefix[block_size - 1], 1, MPI_INT, prefix_sums, 1, MPI_INT, 0, communicator);
int accum = 0;
// Rank 0 calculates the prefix sums from gathering data and broadcasts them to all processes
if (rank == 0) {
for (int i = 0; i < size; i++) {
accum += prefix_sums[i];
prefix_sums[i] = accum;
}
}
MPI_Bcast(prefix_sums, size, MPI_INT, 0, communicator);
// Calculate the final prefix sum for the block using the calculated prefix sums
for (int i = 0; i < block_size; i++) {
block_prefix[i] = local_prefix[i] + prefix_sums[rank] - local_prefix[block_size - 1];
}
// Free allocated memory
free(local_prefix);
free(prefix_sums);
}
// Function to calculate sequential prefix sum
int* calculate_sequential_prefix_sum(int* array, int size) {
int* prefix_sum = (int*)malloc(size * sizeof(int));
prefix_sum[0] = array[0];
for (int i = 1; i < size; i++) {
prefix_sum[i] = prefix_sum[i - 1] + array[i];
}
return prefix_sum;
}
int main(int argc, char** args) {
MPI_Init(&argc, &args);
int my_rank;
int com_size;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &com_size);
int total_array_size = 2048;
// Adjust the total array size to be a multiple of the number of processes
if (total_array_size % com_size != 0)
total_array_size = (total_array_size / com_size + 1) * com_size;
int block_size = total_array_size / com_size;
int* total_array = NULL;
int* total_prefix = NULL;
// Only process 0 initializes and fills the total array with random values
if (my_rank == 0) {
total_array = (int*)malloc(total_array_size * sizeof(int));
total_prefix = (int*)malloc(total_array_size * sizeof(int));
// Fill the total array with random values
for (int i = 0; i < total_array_size; i++)
total_array[i] = rand() % 11;
}
int* block_array = (int*)malloc(block_size * sizeof(int));
int* block_prefix = (int*)malloc(block_size * sizeof(int));
// Scatter the total array among processes
MPI_Scatter(total_array, block_size, MPI_INT, block_array, block_size, MPI_INT, 0, MPI_COMM_WORLD);
// Calculate the local prefix sum for the block
prefix_mpi(block_array, block_size, block_prefix, MPI_COMM_WORLD);
// Gather the local prefix sums to compute the total prefix array
MPI_Gather(block_prefix, block_size, MPI_INT, total_prefix, block_size, MPI_INT, 0, MPI_COMM_WORLD);
int accum = 0;
if (my_rank == 0) {
// Verify the correctness of the MPI-based prefix sum
for (int i = 0; i < total_array_size; i++) {
accum += total_array[i];
if (total_prefix[i] != accum)
printf("Error at index %i: %i expected, %i computed\n", i, accum, total_prefix[i]);
}
printf("Test completed!\n");
// Sequential prefix sum calculation for verification
int* seq_prefix = calculate_sequential_prefix_sum(total_array, total_array_size);
for (int i = 0; i < total_array_size; i++) {
if (seq_prefix[i] != total_prefix[i])
printf("Verification Error at index %i: %i expected, %i computed\n", i, seq_prefix[i], total_prefix[i]);
}
free(seq_prefix);
// Free allocated memory
free(total_array);
free(total_prefix);
}
free(block_array);
free(block_prefix);
MPI_Finalize();
return 0;
}
master
slave1 user=mpiuser
slave2 user=mpiuser
\ No newline at end of file
# Makefile for MPI program
# Source file
SRC = Q2.c
# Number of processes
NP = 4
# Compiler
CC = mpicc
# Executable name
EXE = ./out
all: $(EXE)
$(EXE): $(SRC)
$(CC) -o $(EXE) $(SRC)
clean:
rm -f $(EXE)
run:
mpirun -np $(NP) -f mpi_hosts $(EXE)
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdbool.h>
// Function to perform parallel reduction using a binary tree
void reduce_tree(
int* send_data,
int* recv_data,
int count,
MPI_Comm communicator)
{
int my_rank;
int size;
MPI_Comm_rank(communicator, &my_rank);
MPI_Comm_size(communicator, &size);
// Allocate memory for local reduction
int* local_result = (int*)malloc(count * sizeof(int));
memcpy(local_result, send_data, count * sizeof(int));
int step = 1;
while (step < size)
{
if (my_rank % (2 * step) == 0)
{
int source = my_rank + step;
if (source < size)
{
int* received_data = (int*)malloc(count * sizeof(int));
// Receive data from the source process
MPI_Recv(received_data, count, MPI_INT, source, 0, communicator, MPI_STATUS_IGNORE);
// Perform element-wise addition with received data
for (int i = 0; i < count; i++)
{
local_result[i] += received_data[i];
}
free(received_data);
}
}
else
{
int destination = my_rank - step;
// Send local result to the destination process
MPI_Send(local_result, count, MPI_INT, destination, 0, communicator);
break;
}
step *= 2;
}
// Copy the final result to the recv_data buffer for the root process
if (my_rank == 0)
{
memcpy(recv_data, local_result, count * sizeof(int));
}
free(local_result);
}
// Function to perform sequential reduction
void reduce_sequential(
int* send_data,
int* recv_data,
int count,
MPI_Comm communicator)
{
int my_rank;
int com_size;
MPI_Comm_rank(communicator, &my_rank);
MPI_Comm_size(communicator, &com_size);
int* gather_buffer = NULL;
if (my_rank == 0)
{
// Allocate memory for the gather buffer on root process
gather_buffer = (int*)calloc(count * com_size, sizeof(int));
}
// Gather data from all processes to the root process
MPI_Gather(send_data, count, MPI_INT, gather_buffer, count, MPI_INT, 0, communicator);
if (my_rank == 0)
{
// Initialize the recv_data buffer on the root process
memset(recv_data, 0, count * sizeof(int));
// Perform element-wise addition with gathered data
for (int p = 0; p < com_size; p++)
for (int i = 0; i < count; i++)
recv_data[i] += gather_buffer[count * p + i];
free(gather_buffer);
}
}
int main(int argc, char** args)
{
MPI_Init(&argc, &args);
int count = 40;
int* recv_array_tree = NULL;
int* recv_array_sequential = NULL;
int my_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank == 0)
{
// Allocate memory for result arrays on root process
recv_array_tree = (int*)malloc(count * sizeof(int));
recv_array_sequential = (int*)malloc(count * sizeof(int));
}
int* send_array = (int*)malloc(count * sizeof(int));
for (int i = 0; i < count; i++)
send_array[i] = my_rank;
// Record start time for parallel algorithm
double start_parallel = MPI_Wtime();
reduce_tree(send_array, recv_array_tree, count, MPI_COMM_WORLD);
// Record end time for parallel algorithm
double end_parallel = MPI_Wtime();
// Record start time for sequential algorithm
double start_sequential = MPI_Wtime();
reduce_sequential(send_array, recv_array_sequential, count, MPI_COMM_WORLD);
// Record end time for sequential algorithm
double end_sequential = MPI_Wtime();
if (my_rank == 0)
{
// Compare results and print messages
for (int i = 0; i < count; i++)
if (recv_array_tree[i] == recv_array_sequential[i])
printf("At index %i: reduce_tree is %i, reduce_sequential is %i\n",
i, recv_array_tree[i], recv_array_sequential[i]);
// Calculate and print execution times
printf("Parallel Algorithm Time: %f seconds\n", end_parallel - start_parallel);
printf("Sequential Algorithm Time: %f seconds\n", end_sequential - start_sequential);
// Free allocated memory for result arrays on root process
free(recv_array_tree);
free(recv_array_sequential);
}
// Free allocated memory for input array on each process
free(send_array);
MPI_Finalize();
return 0;
}
master
slave1 user=mpiuser
slave2 user=mpiuser
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment