Commit 4f59304b authored by drnull03's avatar drnull03

Solved first question and created python shared virtual venv

parents
note for diaa don't forget to run this on the cluster when u are in hiast
also don't forget to pass the host file as argument
mpirun -np 3 --hostfile mpi_hosts python .py
master
slave1 user=mpiuser
slave2 user=mpiuser
from mpi4py import MPI
import numpy as np
def prefix_mpi(local_in, comm):
rank = comm.Get_rank()
size = comm.Get_size()
# step 1
local_sum = np.sum(local_in)
block_sums = comm.gather(local_sum, root=0)
#step 2
if rank == 0:
block_prefix = np.zeros(size, dtype=int)
for i in range(1, size):
block_prefix[i] = block_prefix[i - 1] + block_sums[i - 1]
else:
block_prefix = None
offset = comm.scatter(block_prefix, root=0)
#step 3
local_out = np.zeros(len(local_in), dtype=int)
current = offset
for i in range(len(local_in)):
local_out[i] = current
current += local_in[i]
return local_out
def prefix_sequential(arr):
out = np.zeros(len(arr), dtype=int)
for i in range(1, len(arr)):
out[i] = out[i - 1] + arr[i - 1]
return out
# start of program
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
N = 12
local_n = N // size
#
if rank == 0:
np.random.seed(42)
data = np.random.randint(0, 11, size=N)
print("Input array:", data)
else:
data = None
local_data = np.zeros(local_n, dtype=int)
comm.Scatter(data, local_data, root=0)
local_result = prefix_mpi(local_data, comm)
if rank == 0:
result = np.zeros(N, dtype=int)
else:
result = None
comm.Gather(local_result, result, root=0)
if rank == 0:
seq = prefix_sequential(data)
print("Parallel result :", result)
print("Sequential result:", seq)
print("Correct:", np.array_equal(result, seq))
from mpi4py import MPI
import socket
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(f"Hello from rank {rank}/{size} on {socket.gethostname()}")
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment