Exemplo n.º 1
0
    # Initialize state
    global_state = numpy.zeros(global_width * global_height).reshape(
        global_height, global_width)
    global_state[[0, -1], :] = 1.0  # fill top and bottom row with 1.0
    global_state[:, [0, -1]] = 0.5  # fill left and right column with 0.5
else:
    global_state = []
    epsilon = 0

if rank == 0:
    datasize = global_height * global_width * global_state.dtype.itemsize
else:
    datasize = -1

# All procs receive their local state and add empty ghost rows
epsilon = world.bcast(epsilon, root=0)
local_state = world.scatter(global_state, root=0)
height, width = local_state.shape
empty = numpy.zeros((1, width))
local_state = numpy.concatenate(
    (empty, local_state, empty))  # add ghost rows top and bottom

if rank == 0:
    print "Starting to solve for np:%i w:%i h:%i e:%s" % (
        size, global_width, global_height, epsilon)

stencil_solver(local_state, epsilon)

pupy.finalize()
Exemplo n.º 2
0
mpi = MPI()

world = mpi.MPI_COMM_WORLD

rank = world.rank()
size = world.size()
chunk_size = 4

iterations = 1000

data = [str(i) * chunk_size for i in range(size)]

#print "DATA:" + str(data)

recv_data = world.alltoall(data)

for i in xrange(iterations):
    if rank == 0:
        print "doing iteration %i of %i" % (i + 1, iterations)
    recv_data = world.alltoall(data)

#if rank == 0:
#    print "\tdata:%s \n\trecv_data:%s" % (data,recv_data)
#print "Rank:%i done (%i iterations)" %(rank,iterations)

expected_data = [str(rank) * chunk_size for _ in range(size)]
assert expected_data == recv_data

mpi.finalize()
Exemplo n.º 3
0
def main(reps):
    mpi = MPI()
    world = mpi.MPI_COMM_WORLD
    rank = world.rank()
    size = world.size()
    handle_list = []
    obj = None  # Will be replaced when calling a NBC.

    if rank == 0:
        print "Benchmarking with %d reps" % reps

    datacount = 1000
    while datacount % size != 0:
        datacount += 1

    data = range(datacount)
    b = Benchmark(communicator=world, roots=range(size))

    # Testing allgather
    bw, _ = b.get_tester("allgather", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.iallgather(data)
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()

    # Testing barrier
    bw, _ = b.get_tester("barrier", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.ibarrier()
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()

    # Testing bcast
    bw, _ = b.get_tester("bcast", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.ibcast(data, root=0)
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()

    # Allreduce
    bw, _ = b.get_tester("allreduce", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.iallreduce(data, MPI_sum)
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()

    # Alltoall
    bw, _ = b.get_tester("alltoall", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.ialltoall(data)
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()

    # Gather
    bw, _ = b.get_tester("gather", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.igather(data)
        handle_list.append(obj)
    world.waitall(handle_list)

    world.barrier()
    # Reduce
    bw, _ = b.get_tester("reduce", datasize=rank)
    for _ in range(reps):
        with bw:
            obj = world.ireduce(data, MPI_sum, 0)
        handle_list.append(obj)
    world.waitall(handle_list)

    b.flush()
    mpi.finalize()