Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Sign in
Toggle navigation
M
MPI_Distributed_Programming
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
saad.aswad
MPI_Distributed_Programming
Commits
087777e8
Commit
087777e8
authored
Jan 04, 2026
by
saad.aswad
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[P2] Impl Manual Reduce logic
parent
7a70f960
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
65 additions
and
12 deletions
+65
-12
manual_reduce.py
src/problem2/manual_reduce.py
+65
-12
No files found.
src/problem2/manual_reduce.py
View file @
087777e8
...
...
@@ -2,7 +2,7 @@
from
mpi4py
import
MPI
import
numpy
as
np
def
manual_reduce
(
sendbuf
,
recvbuf
,
op
,
root
,
comm
):
def
manual_reduce
(
sendbuf
,
recvbuf
,
op
=
MPI
.
SUM
,
root
=
0
,
comm
=
MPI
.
COMM_WORLD
):
"""
Implementation of MPI_Reduce using tree-based communication.
Toplogy:
...
...
@@ -12,18 +12,71 @@ def manual_reduce(sendbuf, recvbuf, op, root, comm):
rank
=
comm
.
Get_rank
()
size
=
comm
.
Get_size
()
#
Start
with local data
# We assume sendbuf is a numpy array
(for addition)
#
Copy sendbuf to a local accumulator (or directly to recvbuf if leaf,
#
but we need a temp buffer for accumulation)
#
Initialize accumulator
with local data
# We assume sendbuf is a numpy array
.
#
recvbuf is where result goes on root.
#
We need a temporary buffer equal to sendbuf size/type.
# Logic:
# 1. Receive from children (if any).
# 2. Add to local accumulator.
# 3. If rank != root, Send accumulator to parent.
# 4. If rank == root, copy accumulator to recvbuf.
# For simplicity, assume sendbuf is a numpy array or scalar.
# If scalar, wrap/unwrap. But assume numpy for assignment.
import
numpy
as
np
pass
# Local accumulator
acc
=
np
.
copy
(
sendbuf
)
# Children
left
=
2
*
rank
+
1
right
=
2
*
rank
+
2
# Receive from Left
if
left
<
size
:
# We need to know the size of data to recv?
# In MPI_Reduce, count/datatype are known. Here rely on numpy auto-detect or logic.
# sendbuf.shape provides the expected shape.
temp_recv
=
np
.
empty_like
(
sendbuf
)
comm
.
Recv
(
temp_recv
,
source
=
left
,
tag
=
111
)
acc
+=
temp_recv
# Receive from Right
if
right
<
size
:
temp_recv
=
np
.
empty_like
(
sendbuf
)
comm
.
Recv
(
temp_recv
,
source
=
right
,
tag
=
111
)
acc
+=
temp_recv
# Send to Parent or Store Result
if
rank
==
root
:
# We are root, result is in acc.
# Copy to recvbuf.
if
recvbuf
is
not
null
:
# In mpi4py recvbuf can be None on non-root, but here we are root.
# Ensure recvbuf is suitable (mutable).
recvbuf
[:]
=
acc
[:]
else
:
parent
=
(
rank
-
1
)
//
2
comm
.
Send
(
acc
,
dest
=
parent
,
tag
=
111
)
def
main
():
pass
comm
=
MPI
.
COMM_WORLD
rank
=
comm
.
Get_rank
()
size
=
comm
.
Get_size
()
# Generate random vector
N
=
10
local_vec
=
np
.
random
.
randint
(
0
,
10
,
N
)
.
astype
(
np
.
int32
)
# print(f"Rank {rank}: {local_vec}")
# Verify with standard Reduce to check correctness
std_result
=
np
.
zeros
(
N
,
dtype
=
np
.
int32
)
if
rank
==
0
else
None
comm
.
Reduce
(
local_vec
,
std_result
,
op
=
MPI
.
SUM
,
root
=
0
)
# Manual Reduce
manual_result
=
np
.
zeros
(
N
,
dtype
=
np
.
int32
)
if
rank
==
0
else
None
manual_reduce
(
local_vec
,
manual_result
,
op
=
MPI
.
SUM
,
root
=
0
,
comm
=
comm
)
if
rank
==
0
:
if
np
.
array_equal
(
std_result
,
manual_result
):
print
(
"Rank 0: Manual Tree Reduce SUCCESS"
)
print
(
f
"Result: {manual_result[:5]}..."
)
else
:
print
(
"Rank 0: Manual Tree Reduce FAILURE"
)
print
(
f
"Std: {std_result}"
)
print
(
f
"Man: {manual_result}"
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment