Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Sign in
Toggle navigation
MPI_HW
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
hasan.bahjat
MPI_HW
Commits
22d51672
Commit
22d51672
authored
Nov 18, 2024
by
hasan.bahjat
💬
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add Ver Simple Implementation of the Naive MPI Allreduce
parent
6c7640e3
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
100 additions
and
0 deletions
+100
-0
MPI_Allrduce.c
MPI_Allrduce.c
+100
-0
No files found.
MPI_Allrduce.c
View file @
22d51672
#include <mpi.h>
#include <iostream>
#include <vector>
#include <cstdlib>
#include <ctime>
using
namespace
std
;
// Naïve implementation of MPI_Allreduce
void
naive_allreduce
(
int
local_value
,
int
&
global_value
,
MPI_Comm
comm
)
{
int
rank
,
size
;
MPI_Comm_rank
(
comm
,
&
rank
);
MPI_Comm_size
(
comm
,
&
size
);
// to store values from all processes
vector
<
int
>
all_values
(
size
,
0
);
// [1] Send the local value to all processes
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
if
(
i
!=
rank
)
{
MPI_Send
(
&
local_value
,
1
,
MPI_INT
,
i
,
0
,
comm
);
}
}
// [2] Receive values from all processes
// Store own value first
all_values
[
rank
]
=
local_value
;
// Recieve the values from other process
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
// to avoid recive value from the process itself
if
(
i
!=
rank
)
{
// Revieve the value gfrom rnal i
MPI_Recv
(
&
all_values
[
i
],
1
,
MPI_INT
,
i
,
0
,
comm
,
MPI_STATUS_IGNORE
);
}
}
// [3] Perform reduction (sum)
// global value
global_value
=
0
;
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
//reduction
global_value
+=
all_values
[
i
];
}
}
int
main
(
int
argc
,
char
**
argv
)
{
// init
MPI_Init
(
&
argc
,
&
argv
);
int
rank
,
size
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
size
);
// Initialize local values
// Seed with time and rank for unique values
srand
(
time
(
0
)
+
rank
);
// Random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
int
local_value
=
rand
()
%
100
;
// Print local values for each process
printf
(
"[Worker @ %d] I has local value: %d
\n
"
,
rank
,
local_value
);
//[1] Use the naive implementation of MPI_Allreduce
int
global_value_naive
=
0
;
naive_allreduce
(
local_value
,
global_value_naive
,
MPI_COMM_WORLD
);
printf
(
"[Worker @ %d] I received global sum (naive): %d
\n
"
,
rank
,
global_value_naive
);
// [2] Use the built-in MPI_Allreduce for comparison
int
global_value_builtin
=
0
;
// Mpi Alllreduce buld in
MPI_Allreduce
(
&
local_value
,
&
global_value_builtin
,
1
,
MPI_INT
,
MPI_SUM
,
MPI_COMM_WORLD
);
printf
(
"[Worker @ %d] I received global sum (MPI_Allreduce): %d
\n
"
,
rank
,
global_value_builtin
);
// Fina;ize
MPI_Finalize
();
return
0
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment