Пример #1
0
def test_Reduce(size, max_iterations):
    comm = ci.communicator
    num_procs = ci.num_procs

    def Reduce(data, max_iterations):
        """docstring for Reduce"""
        current_root = 0
        for _ in xrange(max_iterations):
            # For the reduce operator we use pupyMPI's built-in max
            comm.reduce(data, MPI_sum, current_root)
            # Switch root
            current_root = (current_root +1) % num_procs
    # end of test

    # reduce makes no sense for size < 1
    if not size > 0:
        return None

    ci.synchronize_processes()
    t1 = ci.clock_function()
    
    # do magic
    Reduce(ci.reduce_data[:size], max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #2
0
def test_Bcast(size, max_iterations):
    comm = ci.communicator
    num_procs = ci.num_procs

    def Bcast(data, max_iterations):
        """docstring for Bcast"""
        root = 0
        for _ in xrange(max_iterations):
            comm.bcast(data, root)

            # Switch root
            root = (root +1) % num_procs

    # end of test
    ci.synchronize_processes()

    t1 = ci.clock_function()

    # Doit
    Bcast(ci.data[:size], max_iterations)

    t2 = ci.clock_function()

    time = (t2 - t1)
    return time
Пример #3
0
def test_Scatter(size, max_iterations):
    rank = ci.rank
    num_procs = ci.num_procs
    comm = ci.communicator

    data = ci.data[:(size*num_procs)]

    def Scatter(data, max_iterations):
        current_root = 0
        for _ in xrange(max_iterations):
            comm.scatter(data, current_root)

            # Switch root
            current_root = (current_root +1) % num_procs
    # end of test

    # scatter makes no sense for size < 1
    if not size > 0:
        return None

    ci.synchronize_processes()
    t1 = ci.clock_function()

    # do magic
    Scatter(data, max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #4
0
def test_PingPong(size, max_iterations):
    def PingPong(s_tag, r_tag, source, dest, data, max_iterations):
        for _ in xrange(max_iterations):
            if ci.rank == ci.pair0:
                ci.communicator.send(data, dest, s_tag)
                ci.communicator.recv(source, r_tag)
            elif ci.rank == ci.pair1:
                ci.communicator.recv(source, r_tag)
                ci.communicator.send(data, dest, s_tag)
            else:
                raise Exception("Broken state")

    # end of test

    (s_tag, r_tag) = ci.get_tags_single()
    (source, dest
     ) = ci.get_srcdest_paired()  # source for purposes of recv, rank-relative
    data = ci.data[0:size]

    ci.synchronize_processes()

    t1 = ci.clock_function()

    # do magic
    PingPong(s_tag, r_tag, source, dest, data, max_iterations)

    t2 = ci.clock_function()
    time = (t2 - t1)

    return time
Пример #5
0
def test_Sendrecv(size, max_iterations):
    def get_srcdest_chained():
        dest = (ci.rank + 1) % ci.num_procs
        source = (ci.rank + ci.num_procs - 1) % ci.num_procs
        return (source, dest)

    def Sendrecv(s_tag, r_tag, source, dest, data, max_iterations):
        for _ in xrange(max_iterations):
            ci.communicator.sendrecv(data, dest, s_tag, source, r_tag)

    # end of test

    (s_tag, r_tag) = ci.get_tags_single()
    data = ci.data[0:size]
    ci.synchronize_processes()

    (source, dest) = get_srcdest_chained()
    t1 = ci.clock_function()

    # do magic
    Sendrecv(s_tag, r_tag, source, dest, data, max_iterations)

    t2 = ci.clock_function()
    time = (t2 - t1)

    return time
Пример #6
0
def test_MCPi(size, max_iterations):
    import pi.parallel

    ### Setup parameters

    #Problem size of mc pi scales differently
    problemsize = size *1000

    useGraphics=False

    epsilonFactor = 0.1

    update_freq = 0
    useGraphics = 0

    rank = ci.communicator.rank()
    world_size = ci.communicator.size()

    t1 = ci.clock_function()

    ci.synchronize_processes()

    hits = pi.parallel.approximate(rank,problemsize)

    # distribute
    global_hits = ci.communicator.reduce(hits,sum)

    t2 = ci.clock_function()

    time = t2 - t1

    return time
Пример #7
0
def test_Alltoall(size, max_iterations):
    comm = ci.communicator
    def Alltoall(data, max_iterations):
        """docstring for Alltoall"""
        for _ in xrange(max_iterations):
            comm.alltoall(data)
        # end of test

    # Slice the data to the size needed
    dataslice = ci.data[:(size*ci.num_procs)]
    ci.synchronize_processes()
    t1 = ci.clock_function()

    # do magic
    Alltoall(dataslice, max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #8
0
def test_Allgather(size, max_iterations):
    comm = ci.communicator

    def Allgather(data, max_iterations):
        """docstring for Allgather"""
        for _ in xrange(max_iterations):
            comm.allgather( data )
    # end of test

    ci.synchronize_processes()

    t1 = ci.clock_function()

    # do magic
    Allgather(ci.data[:size], max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1

    return time
Пример #9
0
def test_Barrier(size, max_iterations):
    comm = ci.communicator
    def Barrier(max_iterations):
        """docstring for Barrier"""
        for _ in xrange(max_iterations):
            comm.barrier()
    # end of test

    if size is not 0:
        return None # We don't care about barrier for increasing sizes

    ci.synchronize_processes()
    t1 = ci.clock_function()

    # do magic
    Barrier(max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #10
0
def test_Gather(size, max_iterations):
    comm = ci.communicator
    num_procs = ci.num_procs

    def Gather(data, max_iterations):
        current_root = 0
        for _ in xrange(max_iterations):
            comm.gather(data, current_root)
            # Switch root
            current_root = (current_root +1) % num_procs
    # end of test

    ci.synchronize_processes()
    t1 = ci.clock_function()

    # do magic
    Gather(ci.data[:size], max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #11
0
def test_Allreduce(size, max_iterations):
    comm = ci.communicator

    def Allreduce(data, max_iterations):
        for _ in xrange(max_iterations):
            # For the reduce operator we use pupyMPI's built-in max
            comm.allreduce(data, MPI_sum)
    # end of test

    # allreduce makes no sense for size < 1
    if not size > 0:
        return None

    ci.synchronize_processes()
    t1 = ci.clock_function()

    # do magic
    Allreduce(ci.reduce_data[:size], max_iterations)

    t2 = ci.clock_function()
    time = t2 - t1
    return time
Пример #12
0
def test_ThreadSaturationBcast(size, max_iterations):
    def Bcast(data, max_iterations):
        root = 0
        for _ in xrange(max_iterations):
            my_data = data
            ci.communicator.bcast(my_data, root)
            # Switch root
            root = (root + 1) % ci.num_procs

    # end of test

    ci.synchronize_processes()

    t1 = ci.clock_function()

    # Doit
    Bcast(ci.data[:size], max_iterations)

    t2 = ci.clock_function()

    time = (t2 - t1)
    return time
Пример #13
0
def test_SOR(size, max_iterations):
    import sor.parallel
    import copy

    ### Setup parameters

    #Default problem size
    xsize=size
    ysize=size
    #xsize=24
    #ysize=24

    useGraphics=False

    epsilonFactor = 0.1

    update_freq = 0
    useGraphics = 0

    rank = ci.communicator.rank()
    world_size = ci.communicator.size()

    ci.synchronize_processes()

    t1 = ci.clock_function()

    (local_state, global_state, rboffset, epsilon) = sor.parallel.setup_problem(rank, world_size, xsize, ysize,epsilonFactor)

    # odd number of rows and odd rank means local state starts with a black point instead of red
    rboffset = (rank % 2) * (ysize % 2)

    #Start solving the heat equation
    sor.parallel.solve(rank, world_size, local_state, rboffset, epsilon, update_freq, useGraphics, ci.communicator)

    t2 = ci.clock_function()

    time = t2 - t1

    return time
Пример #14
0
def test_ThreadSaturationExchange(size, max_iterations):
    def get_leftright_chained():
        if ci.rank < ci.num_procs - 1:
            right = ci.rank + 1
        if ci.rank > 0:
            left = ci.rank - 1

        if ci.rank == ci.num_procs - 1:
            right = 0
        if ci.rank == 0:
            left = ci.num_procs - 1

        return (left, right)

    def Exchange(s_tag, r_tag, left, right, data, max_iterations):
        for _ in xrange(max_iterations):
            ci.communicator.isend(data, right, s_tag)
            ci.communicator.isend(data, left, s_tag)
            ci.communicator.recv(left, r_tag)
            ci.communicator.recv(right, r_tag)

    # end of test
    (s_tag, r_tag) = ci.get_tags_single()
    data = ci.data[0:size]
    ci.synchronize_processes()

    (left, right) = get_leftright_chained()

    t1 = ci.clock_function()

    # do magic
    Exchange(s_tag, r_tag, left, right, data, max_iterations)

    t2 = ci.clock_function()
    time = (t2 - t1)

    return time
Пример #15
0
def test_PingPing(size, max_iterations):
    def PingPing(s_tag, r_tag, source, dest, data, max_iterations):
        for _ in xrange(max_iterations):
            request = ci.communicator.isend(data, dest)
            ci.communicator.recv(source)
            request.wait()

    # end of test

    (s_tag, r_tag) = ci.get_tags_single()
    (source, dest
     ) = ci.get_srcdest_paired()  # source for purposes of recv, rank-relative
    data = ci.data[0:size]
    ci.synchronize_processes()

    t1 = ci.clock_function()

    # do magic
    PingPing(s_tag, r_tag, source, dest, data, max_iterations)

    t2 = ci.clock_function()
    time = (t2 - t1)

    return time