예제 #1
0
parent = mpi.mpi_comm_get_parent()
parentSize = mpi.mpi_comm_size(parent)
print "parentSize", parentSize

tod = stamp()
s = sys.argv[1] + "%2.2d" % myid
print "hello from python worker", myid, " writing to ", s

x = array([5, 3, 4, 2], 'i')
print "starting bcast"
buffer = mpi.mpi_bcast(x, 4, mpi.MPI_INT, 0, parent)
out = open(s, "w")
out.write(str(buffer))
out.write(tod + "\n")
out.close()

print myid, " got ", buffer
junk = mpi.mpi_scatter(x, 1, mpi.MPI_INT, 1, mpi.MPI_INT, 0, parent)
print myid, " got scatter ", junk

back = mpi.mpi_recv(1, mpi.MPI_INT, 0, 1234, parent)
back[0] = back[0] + 1
mpi.mpi_send(back, 1, mpi.MPI_INT, 0, 5678, parent)

dummy = myid
final = mpi.mpi_reduce(dummy, 1, mpi.MPI_INT, mpi.MPI_SUM, 0, parent)

sleep(10)
mpi.mpi_comm_free(parent)
mpi.mpi_finalize()
예제 #2
0
파일: tspawn.py 프로젝트: timkphd/examples
newcom1=mpi.mpi_comm_spawn(toRun,"from_P_",copies,mpi.MPI_INFO_NULL,0,mpi.MPI_COMM_WORLD)
errors=mpi.mpi_array_of_errcodes()
print "errors=",errors
newcom1Size=mpi.mpi_comm_size(newcom1)
print "newcom1Size",newcom1Size," yes it is strange but it should be 1"

##### bcast ####
x=array(([1,2,3,4]),"i")
count=4
print "head starting bcast",x
junk=mpi.mpi_bcast(x,count,mpi.MPI_INT,mpi.MPI_ROOT,newcom1)
print "head did bcast"

##### scatter ####
scat=array([10,20,30],"i")
junk=mpi.mpi_scatter(scat,1,mpi.MPI_INT,1,mpi.MPI_INT,mpi.MPI_ROOT,newcom1)

##### send/recv ####
for i in range(0,copies):
	k=(i+1)*100
	mpi.mpi_send(k,1,mpi.MPI_INT,i,1234,newcom1)
	back=mpi.mpi_recv(1,mpi.MPI_INT,i,5678,newcom1)
	print "from ",i,back

##### reduce ####
dummy=1000
final=mpi.mpi_reduce(dummy,1,mpi.MPI_INT,mpi.MPI_SUM,mpi.MPI_ROOT,newcom1)


sleep(5)
예제 #3
0
mpi_root = 0

#each processor will get count elements from the root
count = 4
# in python we do not need to preallocate the array myray
# we do need to assign a dummy value to the send_ray
send_ray = zeros(0, "i")
if myid == mpi_root:
    size = count * numnodes
    send_ray = zeros(size, "i")
    for i in range(0, size):
        send_ray[i] = i

#send different data to each processor
myray = mpi.mpi_scatter(send_ray, count, mpi.MPI_INT, count, mpi.MPI_INT,
                        mpi_root, mpi.MPI_COMM_WORLD)

#each processor does a local sum
total = 0
for i in range(0, count):
    total = total + myray[i]
print "myid=", myid, "total=", total

#gather back to the root and sum/print
back_ray = mpi.mpi_gather(total, 1, mpi.MPI_INT, 1, mpi.MPI_INT, mpi_root,
                          mpi.MPI_COMM_WORLD)
if myid == mpi_root:
    total = 0
    for i in range(0, numnodes):
        total = total + back_ray[i]
    print "results from all processors=", total
예제 #4
0
파일: p_ex05d.py 프로젝트: mstabrin/pydusa
mpi_root = 0

#each processor will get count elements from the root
count = 4
# in python we do not need to preallocate the array myray
# we do need to assign a dummy value to the send_ray
send_ray = zeros(0, "d")
if myid == mpi_root:
    size = count * numnodes
    send_ray = zeros(size, "d")
    for i in range(0, size):
        send_ray[i] = i

#send different data to each processor
myray = mpi.mpi_scatter(send_ray, count, mpi.MPI_DOUBLE, count, mpi.MPI_DOUBLE,
                        mpi_root, mpi.MPI_COMM_WORLD)

#each processor does a local sum
total = 0.0
for i in range(0, count):
    total = total + myray[i]
print "myid=", myid, "total=", total

#gather back to the root and sum/print
back_ray = mpi.mpi_gather(total, 1, mpi.MPI_DOUBLE, 1, mpi.MPI_DOUBLE,
                          mpi_root, mpi.MPI_COMM_WORLD)
if myid == mpi_root:
    total = 0.0
    for i in range(0, numnodes):
        total = total + back_ray[i]
    print "results from all processors=", total