Commit 18d79ec1 authored by hasan.bahjat's avatar hasan.bahjat 💬

Upadte All reduce code

parent 2571ecd7
...@@ -7,27 +7,27 @@ ...@@ -7,27 +7,27 @@
using namespace std; using namespace std;
// Naïve implementation of MPI_Allreduce // Naïve implementation of MPI_Allreduce
void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) { void naiveAllreduce(int localValue, int& globalValue, MPI_Comm comm) {
int rank, size; int rank, size;
MPI_Comm_rank(comm, &rank); MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size); MPI_Comm_size(comm, &size);
// to store values from all processes // to store values from all processes
vector<int> all_values(size, 0); vector<int> allValues(size, 0);
// [1] Send the local value to all processes // [1] Send the local value to all processes
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
if (i != rank) { if (i != rank) {
MPI_Send(&local_value, 1, MPI_INT, i, 0, comm); MPI_Send(&localValue, 1, MPI_INT, i, 0, comm);
} }
} }
// [2] Receive values from all processes // [2] Receive values from all processes
// Store own value first // Store own value first
all_values[rank] = local_value; allValues[rank] = localValue;
// Recieve the values from other process // Recieve the values from other process
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
...@@ -35,7 +35,7 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) { ...@@ -35,7 +35,7 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
if (i != rank) { if (i != rank) {
// Revieve the value gfrom rnal i // Revieve the value gfrom rnal i
MPI_Recv( MPI_Recv(
&all_values[i], &allValues[i],
1, 1,
MPI_INT, MPI_INT,
i, 0, i, 0,
...@@ -48,40 +48,40 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) { ...@@ -48,40 +48,40 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
// [3] Perform reduction (sum) // [3] Perform reduction (sum)
// global value // global value
global_value = 0; globalValue = 0;
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
//reduction //reduction
global_value += all_values[i]; globalValue += allValues[i];
} }
} }
void naive_allreduce_array(const vector<int>& local_array, vector<int>& global_array, MPI_Comm comm) { void naiveAllreduceArray(const vector<int>& localArray, vector<int>& globalArray, MPI_Comm comm) {
// init // init
int rank, size; int rank, size;
MPI_Comm_rank(comm, &rank); MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size); MPI_Comm_size(comm, &size);
int array_size = local_array.size(); int arraySize = localArray.size();
// Temporary buffer to store received arrays // Temporary buffer to store received arrays
vector<int> received_array(array_size, 0); vector<int> receivedArray(arraySize, 0);
// Initialize global_array with local_array // Initialize global_array with localArray
global_array = local_array; globalArray = localArray;
// [1] Exchange data with all other processes // [1] Exchange data with all other processes
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
if (i != rank) { if (i != rank) {
// Send local_array to process i // Send localArray to process i
MPI_Send(local_array.data(), array_size, MPI_INT, i, 0, comm); MPI_Send(localArray.data(), arraySize, MPI_INT, i, 0, comm);
// Receive array from process i // Receive array from process i
MPI_Recv(received_array.data(), array_size, MPI_INT, i, 0, comm, MPI_STATUS_IGNORE); MPI_Recv(receivedArray.data(), arraySize, MPI_INT, i, 0, comm, MPI_STATUS_IGNORE);
// Add the received array to the global_array // Add the received array to the global_array
for (int j = 0; j < array_size; j++) { for (int j = 0; j < arraySize; j++) {
global_array[j] += received_array[j]; globalArray[j] += receivedArray[j];
} }
} }
} }
...@@ -89,14 +89,14 @@ void naive_allreduce_array(const vector<int>& local_array, vector<int>& global_a ...@@ -89,14 +89,14 @@ void naive_allreduce_array(const vector<int>& local_array, vector<int>& global_a
void print(vector<int>& array ,int rank, string message){ void print(vector<int>& array ,int rank, string message){
string output_local = "[Worker @ " +to_string(rank) + "] "+message+": "; string output = "[Worker @ " +to_string(rank) + "] "+message+": ";
for (int val : array) { for (int val : array) {
output_local += to_string(val) + " "; output += to_string(val) + " ";
} }
printf("%s \n", output_local.c_str()); printf("%s \n", output.c_str());
} }
...@@ -115,40 +115,40 @@ int main(int argc, char** argv) { ...@@ -115,40 +115,40 @@ int main(int argc, char** argv) {
srand(time(0) + rank); srand(time(0) + rank);
// Random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array ) // Random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
int local_value = rand() % 100; int localValue = rand() % 100;
const int ARRAY_SIZE = 5; // Length of each process's array const int ARRAY_SIZE = 5; // Length of each process's array
vector<int> local_array(ARRAY_SIZE); vector<int> localArray(ARRAY_SIZE);
vector<int> global_array_naive(ARRAY_SIZE); vector<int> globalArrayNaive(ARRAY_SIZE);
vector<int> global_array_builtin(ARRAY_SIZE); vector<int> globalArrayBuiltin(ARRAY_SIZE);
// Generate random integers for the local array // Generate random integers for the local array
for (int i = 0; i < ARRAY_SIZE; i++) { for (int i = 0; i < ARRAY_SIZE; i++) {
local_array[i] = rand() % 100; // Random integers between 0 and 99 localArray[i] = rand() % 100; // Random integers between 0 and 99
} }
// Print local values for each process // Print local values for each process
// printf("[Worker @ %d] I has local value: %d \n",rank,local_value ); // printf("[Worker @ %d] I has local value: %d \n",rank,localValue );
print(local_array,rank,"Local array"); print(localArray,rank,"Local array");
//[1] Use the naive implementation of MPI_Allreduce //[1] Use the naive implementation of MPI_Allreduce
int global_value_naive = 0; int globalValue = 0;
// Start timing // Start timing
double start_time_naive = MPI_Wtime(); double start_time_naive = MPI_Wtime();
// naive MPI All Reduce // naive MPI All Reduce
// naive_allreduce( // naive_allreduce(
// local_value, // localValue,
// global_value_naive, // globalValue,
// MPI_COMM_WORLD // MPI_COMM_WORLD
// ); // );
naive_allreduce_array( naiveAllreduceArray(
local_array, localArray,
global_array_naive, globalArrayNaive,
MPI_COMM_WORLD MPI_COMM_WORLD
); );
...@@ -157,31 +157,31 @@ int main(int argc, char** argv) { ...@@ -157,31 +157,31 @@ int main(int argc, char** argv) {
double time_naive = end_time_naive - start_time_naive; double time_naive = end_time_naive - start_time_naive;
// printf("[Worker @ %d] My Naive MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n", // printf("[Worker @ %d] My Naive MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// rank, global_value_naive, time_naive); // rank, globalValue, time_naive);
string message ="My Naive MPI_Allreduce "; string message ="My Naive MPI_Allreduce ";
message += "Time: " + to_string(time_naive) + " seconds | Result"; message += "Time: " + to_string(time_naive) + " seconds | Result";
print(global_array_naive,rank,message); print(globalArrayNaive,rank,message);
// [2] Use the built-in MPI_Allreduce for comparison // [2] Use the built-in MPI_Allreduce for comparison
int global_value_builtin = 0; int globalValueBuiltIn = 0;
// Start timing // Start timing
double start_time_builtin = MPI_Wtime(); double start_time_builtin = MPI_Wtime();
// Mpi Alllreduce buld in // Mpi Alllreduce buld in
// MPI_Allreduce( // MPI_Allreduce(
// &local_value, // &localValue,
// &global_value_builtin, // &globalValueBuiltIn,
// 1, MPI_INT, // 1, MPI_INT,
// MPI_SUM, // MPI_SUM,
// MPI_COMM_WORLD // MPI_COMM_WORLD
// ); // );
MPI_Allreduce( MPI_Allreduce(
local_array.data(), localArray.data(),
global_array_builtin.data(), globalArrayBuiltin.data(),
ARRAY_SIZE, ARRAY_SIZE,
MPI_INT, MPI_INT,
MPI_SUM, MPI_SUM,
...@@ -193,12 +193,12 @@ int main(int argc, char** argv) { ...@@ -193,12 +193,12 @@ int main(int argc, char** argv) {
double time_builtin = end_time_builtin - start_time_builtin; double time_builtin = end_time_builtin - start_time_builtin;
// printf("[Worker @ %d] Built-in MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n", // printf("[Worker @ %d] Built-in MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// rank, global_value_builtin, time_builtin); // rank, globalArrayBuiltin, time_builtin);
message ="Built-in MPI_Allreduce"; message ="Built-in MPI_Allreduce";
message += " Time: " + to_string(time_builtin) + " seconds | Result"; message += " Time: " + to_string(time_builtin) + " seconds | Result";
print(global_array_builtin,rank,message); print(globalArrayBuiltin,rank,message);
// Fina;ize // Fina;ize
MPI_Finalize(); MPI_Finalize();
......
...@@ -133,6 +133,9 @@ int main(int argc, char** argv) { ...@@ -133,6 +133,9 @@ int main(int argc, char** argv) {
// seed with time and rank for unique values // seed with time and rank for unique values
srand(time(0) + rank); srand(time(0) + rank);
// random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
int localValue = rand() % 100;
// define a : // define a :
// local array for the local reduction // local array for the local reduction
vector<int> localArray(ARRAY_SIZE); vector<int> localArray(ARRAY_SIZE);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment