Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Sign in
Toggle navigation
MPI_HW
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
hasan.bahjat
MPI_HW
Commits
18d79ec1
Commit
18d79ec1
authored
Nov 19, 2024
by
hasan.bahjat
💬
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Upadte All reduce code
parent
2571ecd7
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
45 additions
and
42 deletions
+45
-42
MPI_Allrduce.c
MPI_Allrduce.c
+42
-42
RingAllreduce.c
RingAllreduce.c
+3
-0
No files found.
MPI_Allrduce.c
View file @
18d79ec1
...
@@ -7,27 +7,27 @@
...
@@ -7,27 +7,27 @@
using
namespace
std
;
using
namespace
std
;
// Naïve implementation of MPI_Allreduce
// Naïve implementation of MPI_Allreduce
void
naive
_allreduce
(
int
local_value
,
int
&
global_v
alue
,
MPI_Comm
comm
)
{
void
naive
Allreduce
(
int
localValue
,
int
&
globalV
alue
,
MPI_Comm
comm
)
{
int
rank
,
size
;
int
rank
,
size
;
MPI_Comm_rank
(
comm
,
&
rank
);
MPI_Comm_rank
(
comm
,
&
rank
);
MPI_Comm_size
(
comm
,
&
size
);
MPI_Comm_size
(
comm
,
&
size
);
// to store values from all processes
// to store values from all processes
vector
<
int
>
all
_v
alues
(
size
,
0
);
vector
<
int
>
all
V
alues
(
size
,
0
);
// [1] Send the local value to all processes
// [1] Send the local value to all processes
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
if
(
i
!=
rank
)
{
if
(
i
!=
rank
)
{
MPI_Send
(
&
local
_v
alue
,
1
,
MPI_INT
,
i
,
0
,
comm
);
MPI_Send
(
&
local
V
alue
,
1
,
MPI_INT
,
i
,
0
,
comm
);
}
}
}
}
// [2] Receive values from all processes
// [2] Receive values from all processes
// Store own value first
// Store own value first
all
_values
[
rank
]
=
local_v
alue
;
all
Values
[
rank
]
=
localV
alue
;
// Recieve the values from other process
// Recieve the values from other process
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
...
@@ -35,7 +35,7 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
...
@@ -35,7 +35,7 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
if
(
i
!=
rank
)
{
if
(
i
!=
rank
)
{
// Revieve the value gfrom rnal i
// Revieve the value gfrom rnal i
MPI_Recv
(
MPI_Recv
(
&
all
_v
alues
[
i
],
&
all
V
alues
[
i
],
1
,
1
,
MPI_INT
,
MPI_INT
,
i
,
0
,
i
,
0
,
...
@@ -48,40 +48,40 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
...
@@ -48,40 +48,40 @@ void naive_allreduce(int local_value, int& global_value, MPI_Comm comm) {
// [3] Perform reduction (sum)
// [3] Perform reduction (sum)
// global value
// global value
global
_v
alue
=
0
;
global
V
alue
=
0
;
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
//reduction
//reduction
global
_value
+=
all_v
alues
[
i
];
global
Value
+=
allV
alues
[
i
];
}
}
}
}
void
naive
_allreduce_array
(
const
vector
<
int
>&
local_array
,
vector
<
int
>&
global_a
rray
,
MPI_Comm
comm
)
{
void
naive
AllreduceArray
(
const
vector
<
int
>&
localArray
,
vector
<
int
>&
globalA
rray
,
MPI_Comm
comm
)
{
// init
// init
int
rank
,
size
;
int
rank
,
size
;
MPI_Comm_rank
(
comm
,
&
rank
);
MPI_Comm_rank
(
comm
,
&
rank
);
MPI_Comm_size
(
comm
,
&
size
);
MPI_Comm_size
(
comm
,
&
size
);
int
array
_size
=
local_a
rray
.
size
();
int
array
Size
=
localA
rray
.
size
();
// Temporary buffer to store received arrays
// Temporary buffer to store received arrays
vector
<
int
>
received
_array
(
array_s
ize
,
0
);
vector
<
int
>
received
Array
(
arrayS
ize
,
0
);
// Initialize global_array with local
_a
rray
// Initialize global_array with local
A
rray
global
_array
=
local_a
rray
;
global
Array
=
localA
rray
;
// [1] Exchange data with all other processes
// [1] Exchange data with all other processes
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
for
(
int
i
=
0
;
i
<
size
;
i
++
)
{
if
(
i
!=
rank
)
{
if
(
i
!=
rank
)
{
// Send local
_a
rray to process i
// Send local
A
rray to process i
MPI_Send
(
local
_array
.
data
(),
array_s
ize
,
MPI_INT
,
i
,
0
,
comm
);
MPI_Send
(
local
Array
.
data
(),
arrayS
ize
,
MPI_INT
,
i
,
0
,
comm
);
// Receive array from process i
// Receive array from process i
MPI_Recv
(
received
_array
.
data
(),
array_s
ize
,
MPI_INT
,
i
,
0
,
comm
,
MPI_STATUS_IGNORE
);
MPI_Recv
(
received
Array
.
data
(),
arrayS
ize
,
MPI_INT
,
i
,
0
,
comm
,
MPI_STATUS_IGNORE
);
// Add the received array to the global_array
// Add the received array to the global_array
for
(
int
j
=
0
;
j
<
array
_s
ize
;
j
++
)
{
for
(
int
j
=
0
;
j
<
array
S
ize
;
j
++
)
{
global
_array
[
j
]
+=
received_a
rray
[
j
];
global
Array
[
j
]
+=
receivedA
rray
[
j
];
}
}
}
}
}
}
...
@@ -89,14 +89,14 @@ void naive_allreduce_array(const vector<int>& local_array, vector<int>& global_a
...
@@ -89,14 +89,14 @@ void naive_allreduce_array(const vector<int>& local_array, vector<int>& global_a
void
print
(
vector
<
int
>&
array
,
int
rank
,
string
message
){
void
print
(
vector
<
int
>&
array
,
int
rank
,
string
message
){
string
output
_local
=
"[Worker @ "
+
to_string
(
rank
)
+
"] "
+
message
+
": "
;
string
output
=
"[Worker @ "
+
to_string
(
rank
)
+
"] "
+
message
+
": "
;
for
(
int
val
:
array
)
{
for
(
int
val
:
array
)
{
output
_local
+=
to_string
(
val
)
+
" "
;
output
+=
to_string
(
val
)
+
" "
;
}
}
printf
(
"%s
\n
"
,
output
_local
.
c_str
());
printf
(
"%s
\n
"
,
output
.
c_str
());
}
}
...
@@ -115,40 +115,40 @@ int main(int argc, char** argv) {
...
@@ -115,40 +115,40 @@ int main(int argc, char** argv) {
srand
(
time
(
0
)
+
rank
);
srand
(
time
(
0
)
+
rank
);
// Random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
// Random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
int
local
_v
alue
=
rand
()
%
100
;
int
local
V
alue
=
rand
()
%
100
;
const
int
ARRAY_SIZE
=
5
;
// Length of each process's array
const
int
ARRAY_SIZE
=
5
;
// Length of each process's array
vector
<
int
>
local
_a
rray
(
ARRAY_SIZE
);
vector
<
int
>
local
A
rray
(
ARRAY_SIZE
);
vector
<
int
>
global
_array_n
aive
(
ARRAY_SIZE
);
vector
<
int
>
global
ArrayN
aive
(
ARRAY_SIZE
);
vector
<
int
>
global
_array_b
uiltin
(
ARRAY_SIZE
);
vector
<
int
>
global
ArrayB
uiltin
(
ARRAY_SIZE
);
// Generate random integers for the local array
// Generate random integers for the local array
for
(
int
i
=
0
;
i
<
ARRAY_SIZE
;
i
++
)
{
for
(
int
i
=
0
;
i
<
ARRAY_SIZE
;
i
++
)
{
local
_a
rray
[
i
]
=
rand
()
%
100
;
// Random integers between 0 and 99
local
A
rray
[
i
]
=
rand
()
%
100
;
// Random integers between 0 and 99
}
}
// Print local values for each process
// Print local values for each process
// printf("[Worker @ %d] I has local value: %d \n",rank,local
_v
alue );
// printf("[Worker @ %d] I has local value: %d \n",rank,local
V
alue );
print
(
local
_a
rray
,
rank
,
"Local array"
);
print
(
local
A
rray
,
rank
,
"Local array"
);
//[1] Use the naive implementation of MPI_Allreduce
//[1] Use the naive implementation of MPI_Allreduce
int
global
_value_naiv
e
=
0
;
int
global
Valu
e
=
0
;
// Start timing
// Start timing
double
start_time_naive
=
MPI_Wtime
();
double
start_time_naive
=
MPI_Wtime
();
// naive MPI All Reduce
// naive MPI All Reduce
// naive_allreduce(
// naive_allreduce(
// local
_v
alue,
// local
V
alue,
// global
_value_naiv
e,
// global
Valu
e,
// MPI_COMM_WORLD
// MPI_COMM_WORLD
// );
// );
naive
_allreduce_a
rray
(
naive
AllreduceA
rray
(
local
_a
rray
,
local
A
rray
,
global
_array_n
aive
,
global
ArrayN
aive
,
MPI_COMM_WORLD
MPI_COMM_WORLD
);
);
...
@@ -157,31 +157,31 @@ int main(int argc, char** argv) {
...
@@ -157,31 +157,31 @@ int main(int argc, char** argv) {
double
time_naive
=
end_time_naive
-
start_time_naive
;
double
time_naive
=
end_time_naive
-
start_time_naive
;
// printf("[Worker @ %d] My Naive MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// printf("[Worker @ %d] My Naive MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// rank, global
_value_naiv
e, time_naive);
// rank, global
Valu
e, time_naive);
string
message
=
"My Naive MPI_Allreduce "
;
string
message
=
"My Naive MPI_Allreduce "
;
message
+=
"Time: "
+
to_string
(
time_naive
)
+
" seconds | Result"
;
message
+=
"Time: "
+
to_string
(
time_naive
)
+
" seconds | Result"
;
print
(
global
_array_n
aive
,
rank
,
message
);
print
(
global
ArrayN
aive
,
rank
,
message
);
// [2] Use the built-in MPI_Allreduce for comparison
// [2] Use the built-in MPI_Allreduce for comparison
int
global
_value_builti
n
=
0
;
int
global
ValueBuiltI
n
=
0
;
// Start timing
// Start timing
double
start_time_builtin
=
MPI_Wtime
();
double
start_time_builtin
=
MPI_Wtime
();
// Mpi Alllreduce buld in
// Mpi Alllreduce buld in
// MPI_Allreduce(
// MPI_Allreduce(
// &local
_v
alue,
// &local
V
alue,
// &global
_value_builti
n,
// &global
ValueBuiltI
n,
// 1, MPI_INT,
// 1, MPI_INT,
// MPI_SUM,
// MPI_SUM,
// MPI_COMM_WORLD
// MPI_COMM_WORLD
// );
// );
MPI_Allreduce
(
MPI_Allreduce
(
local
_a
rray
.
data
(),
local
A
rray
.
data
(),
global
_array_b
uiltin
.
data
(),
global
ArrayB
uiltin
.
data
(),
ARRAY_SIZE
,
ARRAY_SIZE
,
MPI_INT
,
MPI_INT
,
MPI_SUM
,
MPI_SUM
,
...
@@ -193,12 +193,12 @@ int main(int argc, char** argv) {
...
@@ -193,12 +193,12 @@ int main(int argc, char** argv) {
double
time_builtin
=
end_time_builtin
-
start_time_builtin
;
double
time_builtin
=
end_time_builtin
-
start_time_builtin
;
// printf("[Worker @ %d] Built-in MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// printf("[Worker @ %d] Built-in MPI_Allreduce: Global sum = %d, Time = %.6f seconds\n",
// rank, global
_value_b
uiltin, time_builtin);
// rank, global
ArrayB
uiltin, time_builtin);
message
=
"Built-in MPI_Allreduce"
;
message
=
"Built-in MPI_Allreduce"
;
message
+=
" Time: "
+
to_string
(
time_builtin
)
+
" seconds | Result"
;
message
+=
" Time: "
+
to_string
(
time_builtin
)
+
" seconds | Result"
;
print
(
global
_array_b
uiltin
,
rank
,
message
);
print
(
global
ArrayB
uiltin
,
rank
,
message
);
// Fina;ize
// Fina;ize
MPI_Finalize
();
MPI_Finalize
();
...
...
RingAllreduce.c
View file @
18d79ec1
...
@@ -133,6 +133,9 @@ int main(int argc, char** argv) {
...
@@ -133,6 +133,9 @@ int main(int argc, char** argv) {
// seed with time and rank for unique values
// seed with time and rank for unique values
srand
(
time
(
0
)
+
rank
);
srand
(
time
(
0
)
+
rank
);
// random integer between 0 and 99 (this is first simple intutyion in the next stage i will replace it with array )
int
localValue
=
rand
()
%
100
;
// define a :
// define a :
// local array for the local reduction
// local array for the local reduction
vector
<
int
>
localArray
(
ARRAY_SIZE
);
vector
<
int
>
localArray
(
ARRAY_SIZE
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment