Commit d21a6a1b authored by sahar.moalla's avatar sahar.moalla

INTIAL

parent 226261c0
# Makefile for MPI program
# Source file
SRC = question1.c
# number of process
NP = 4
# Compiler
CC = mpicc
# Executable name
EXE = ./out
all: $(EXE)
$(EXE): $(SRC)
$(CC) -o $(EXE) $(SRC)
clean:
rm -f $(EXE)
run:
mpirun -np $(NP) -f mpihost $(EXE)
\ No newline at end of file
master
slave1 user=mpiuser
slave2 user=mpiuser
File added
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdbool.h>
void prefix_mpi(int* block_array, int block_size, int* block_prefix, MPI_Comm communicator)
{
int my_rank, com_size;
MPI_Comm_rank(communicator, &my_rank);
MPI_Comm_size(communicator, &com_size);
// Step 1: Parallel sum within each block
int block_sum = 0;
for (int i = 0; i < block_size; i++)
block_sum += block_array[i];
// Step 2: Sequential prefix sum over block sums
int* all_block_sums = (int*)malloc(com_size * sizeof(int));
MPI_Allgather(&block_sum, 1, MPI_INT, all_block_sums, 1, MPI_INT, communicator);
int block_prefix_sum = 0;
for (int i = 0; i < my_rank; i++)
block_prefix_sum += all_block_sums[i];
// Step 3: Parallel prefix sum within each block
block_prefix[0] = block_prefix_sum;
for (int i = 1; i < block_size; i++)
block_prefix[i] = block_prefix[i - 1] + block_array[i - 1];
free(all_block_sums);
}
int main(int argc, char** args)
{
MPI_Init(&argc, &args);
int my_rank;
int com_size;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &com_size);
int total_array_size = 2048;
if (total_array_size % com_size != 0)
total_array_size = (total_array_size / com_size + 1) * com_size;
int block_size = total_array_size / com_size;
int* total_array = NULL;
int* total_prefix = NULL;
if (my_rank == 0)
{
total_array = (int*)malloc(total_array_size * sizeof(int));
total_prefix = (int*)malloc(total_array_size * sizeof(int));
for (int i = 0; i < total_array_size; i++)
total_array[i] = rand() % 11;
}
int* block_array = (int*)malloc(block_size * sizeof(int));
int* block_prefix = (int*)malloc(block_size * sizeof(int));
MPI_Scatter(total_array, block_size, MPI_INT,
block_array, block_size, MPI_INT, 0, MPI_COMM_WORLD);
prefix_mpi(block_array, block_size, block_prefix, MPI_COMM_WORLD);
MPI_Gather(block_prefix, block_size, MPI_INT,
total_prefix, block_size, MPI_INT, 0, MPI_COMM_WORLD);
int accum = 0;
if (my_rank == 0)
{
for (int i = 1; i < total_array_size; i++)
{
accum += total_array[i-1];
if (total_prefix[i] != accum)
printf("Error at index %i: %i expected, %i computed\n", i, accum, total_prefix[i]);
}
printf("Test completed!\n");
free(total_array);
free(total_prefix);
}
free(block_array);
free(block_prefix);
MPI_Finalize();
return 0;
}
# Makefile for MPI program
# Source file
SRC = question2.c
# number of process
NP = 8
# Compiler
CC = mpicc
# Executable name
EXE = ./out
all: $(EXE)
$(EXE): $(SRC)
$(CC) -o $(EXE) $(SRC)
clean:
rm -f $(EXE)
run:
mpirun -np $(NP) -f mpihost $(EXE)
\ No newline at end of file
master
slave1 user=mpiuser
slave2 user=mpiuser
File added
#include <mpi.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <stdbool.h>
void perform_reduction_step(int* local_result, int* received_data, int count) {
// Perform the reduction operation (sum in this case)
for (int i = 0; i < count; i++) {
local_result[i] += received_data[i];
}
}
void reduce_tree(int* send_data, int* recv_data, int count, MPI_Comm communicator) {
int my_rank, size;
MPI_Comm_rank(communicator, &my_rank);
MPI_Comm_size(communicator, &size);
// Allocate memory for local reduction
int* local_result = (int*)malloc(count * sizeof(int));
memcpy(local_result, send_data, count * sizeof(int));
int step = 1;
while (step < size) {
if (my_rank % (2 * step) == 0) {
int source = my_rank + step;
if (source < size) {
int* received_data = (int*)malloc(count * sizeof(int));
MPI_Recv(received_data, count, MPI_INT, source, 0, communicator, MPI_STATUS_IGNORE);
// Perform the reduction operation (sum in this case)
perform_reduction_step(local_result, received_data, count);
free(received_data);
}
} else {
int destination = my_rank - step;
MPI_Send(local_result, count, MPI_INT, destination, 0, communicator);
break;
}
step *= 2;
}
// Copy the final result to the recv_data buffer for the root process
if (my_rank == 0) {
memcpy(recv_data, local_result, count * sizeof(int));
}
free(local_result);
}
void reduce_sequential(
int* send_data,
int* recv_data,
int count,
MPI_Comm communicator)
{
int my_rank;
int com_size;
MPI_Comm_rank(communicator, &my_rank);
MPI_Comm_size(communicator, &com_size);
int* gather_buffer = NULL;
if (my_rank == 0)
{
gather_buffer = (int*) calloc(count * com_size, sizeof(int));
}
MPI_Gather(send_data, count, MPI_INT, gather_buffer, count, MPI_INT, 0, communicator);
if (my_rank == 0)
{
memset(recv_data, 0, count * sizeof(int));
for (int p = 0; p < com_size; p++)
for (int i = 0; i < count; i++)
recv_data[i] += gather_buffer[count * p + i];
free(gather_buffer);
}
}
int main(int argc, char** args)
{
MPI_Init(&argc, &args);
int count = 50;
int* recv_array_tree = NULL;
int* recv_array_sequential = NULL;
int my_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
if (my_rank == 0)
{
recv_array_tree = (int*) malloc(count * sizeof(int));
recv_array_sequential = (int*) malloc(count * sizeof(int));
}
int* send_array = (int*)malloc(count * sizeof(int));
for (int i = 0; i < count; i++)
send_array[i] = my_rank;
// Measure time for tree-based reduction
double start_tree = MPI_Wtime();
reduce_tree(send_array, recv_array_tree, count, MPI_COMM_WORLD);
double end_tree = MPI_Wtime();
// Measure time for sequential reduction
double start_sequential = MPI_Wtime();
reduce_sequential(send_array, recv_array_sequential, count, MPI_COMM_WORLD);
double end_sequential = MPI_Wtime();
if (my_rank == 0)
{
for (int i = 0; i < count; i++)
if (recv_array_tree[i] == recv_array_sequential[i])
printf("At index %i: reduce_tree is %i, reduce_sequential is %i\n",
i, recv_array_tree[i], recv_array_sequential[i]);
printf("Time taken by reduce_tree: %f seconds\n", end_tree - start_tree);
printf("Time taken by reduce_sequential: %f seconds\n", end_sequential - start_sequential);
free(recv_array_tree);
free(recv_array_sequential);
}
free(send_array);
MPI_Finalize();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment