예제 #1
0
def arange(size, iters=10000):
    time = measure_time()
    for _ in range(iters):
        mpi_np.arange(size, dtype=np.float64)
    time = measure_time() - time
    gc.collect()
    return time / iters
예제 #2
0
def arange(size, iters=10000, comm=MPI.COMM_WORLD):
    comm.Barrier()
    time = measure_time()
    for _ in range(iters):
        mpi_np.arange(size, dtype=np.float64, comm=comm, dist='b')
    time = measure_time() - time
    comm.reduce(time, op=MPI.MAX, root=0)
    return time / iters
def arange(size, iters=10000, comm=MPI.COMM_WORLD):
    time = measure_time()
    for _ in range(iters):
        mpi_np.arange(size, dtype=np.float64)
    time = measure_time() - time
    gc.collect()
    comm.reduce(time, op=MPI.MAX, root=0)
    return time / iters
예제 #4
0
    def test_process_observations_providing_mpi_np_array(self):
        #Default block distribution
        mpi_np_observations = mpi_np.arange(8, dist='b')
        processed_obs, num_features, labels  = \
            _process_observations(mpi_np_observations, self.comm)
        self.assertTrue(isinstance(processed_obs, Block))
        self.assertEqual(num_features, 1)
        self.assertTrue(isinstance(labels, Block))

        #Replicated distribution
        mpi_np_observations = mpi_np.arange(8, dist='r')
        processed_obs, num_features, labels  = \
            _process_observations(mpi_np_observations, self.comm)
        self.assertTrue(isinstance(processed_obs, Block))
        self.assertEqual(num_features, 1)
        self.assertTrue(isinstance(labels, Block))
예제 #5
0
 def test_return_behavior_from_all_ranks_float_stop(self):
     np_arange = np.arange(20.0)
     for root in range(self.size):
         stop = None
         self.assertTrue(stop is None)
         if self.rank == root:
             stop = 20.0
         mpi_np_arange = mpi_np.arange(stop,
                                       comm=self.comm,
                                       root=root,
                                       dist=self.dist)
         self.assertTrue(isinstance(mpi_np_arange, mpi_np.MPIArray))
         self.assertTrue(isinstance(mpi_np_arange, self.dist_class))
         self.assertEqual(mpi_np_arange.comm, self.comm)
         self.assertEqual(mpi_np_arange.dist, self.dist)
         self.assertTrue(np.alltrue(mpi_np_arange[:] == np_arange))
예제 #6
0
import mpids.MPInumpy as mpi_np
import numpy as np
from mpi4py import MPI
from operations import add, sub, mul, div

if __name__ == '__main__':
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    n_procs = comm.Get_size()
    local_size = 2**16
    size = n_procs * local_size
    iters = 1000
    mpi_np_arr = mpi_np.arange(size, dtype=np.float64)

    add_time = add(mpi_np_arr, iters=iters)
    sub_time = sub(mpi_np_arr, iters=iters)
    mul_time = mul(mpi_np_arr, iters=iters)
    div_time = div(mpi_np_arr, iters=iters)

    if rank == 0:
        print("mpi_np,add,%d,%d,%.9f" % (n_procs, local_size, add_time))
        print("mpi_np,sub,%d,%d,%.9f" % (n_procs, local_size, sub_time))
        print("mpi_np,mul,%d,%d,%.9f" % (n_procs, local_size, mul_time))
        print("mpi_np,div,%d,%d,%.9f" % (n_procs, local_size, div_time))
예제 #7
0
from mpi4py import MPI
import numpy as np

import mpids.MPInumpy as mpi_np

if __name__ == "__main__":

    #Capture default communicator, MPI process rank, and number of MPI processes
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    note = "Note: creation routines are using their default MPI related kwargs."
    note += "\nDefault kwargs:"
    note += " routine(..., comm=MPI.COMM_WORLD, root=0, dist='b')\n"
    print(note) if rank == 0 else None

    #Arange, evenly spaced values within specified interval
    print('From arange(start, stop, step) Routine') if rank == 0 else None
    mpi_arange = mpi_np.arange(size * 5)
    print('Local Arange Result Rank {}: {}'.format(rank, mpi_arange))
    print() if rank == 0 else None
from mpi4py import MPI

import mpids.MPInumpy as mpi_np

#Sample normalization of all columns by their mean value

if __name__ == "__main__":

    #Capture default communicator and MPI process rank
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    #Arrays  elements (values 0-24)
    block_mpi_array = mpi_np.arange(25, dist='b').reshape(5, 5)

    #Capture distributed array data
    replicated_mpi_array = block_mpi_array[:]

    if rank == 0:
        print('Distributed Array Contents:\n{}\n'.format(replicated_mpi_array))
    comm.Barrier()

    block_mpi_array_col_mean = block_mpi_array.mean(axis=0)
    block_mpi_array_col_normalized = block_mpi_array / block_mpi_array_col_mean

    #Capture distributed array data after setter routine update
    updated_replicated_mpi_array = block_mpi_array_col_normalized[:]
    if rank == 0:
        print('Distributed Array Contents After Column Normalization: \n{}\n'\
            .format(updated_replicated_mpi_array))
예제 #9
0
from mpi4py import MPI

import mpids.MPInumpy as mpi_np

if __name__ == "__main__":

    #Capture default communicator and MPI process rank
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    #Arrays  elements (values 0-9)
    block_mpi_array = mpi_np.arange(10)

    #Capture distributed array data
    replicated_mpi_array = block_mpi_array[:]

    if rank == 0:
        print('Distributed Array Contents: {}\n'.format(replicated_mpi_array))
    comm.Barrier()

    output = 'Rank {} Local Array Contents {}: \n'.format(
        rank, block_mpi_array)
    output += '\t local_array * 2 = {} \n'.format(block_mpi_array * 2)
    output += '\t local_array - 3 = {} \n'.format(block_mpi_array - 3)
    output += '\t local_array + 7 = {} \n'.format(block_mpi_array + 7)
    output += '\t local_array / 0.5 = {} \n'.format(block_mpi_array / 0.5)
    output += '\t local_array // 3 = {} \n'.format(block_mpi_array // 3)
    output += '\t local_array % 2 = {} \n'.format(block_mpi_array % 2)
    output += '\t local_array ** 2 = {} \n'.format(block_mpi_array**2)
    print(output)
예제 #10
0
from mpi4py import MPI

import mpids.MPInumpy as mpi_np

if __name__ == "__main__":

    #Capture default communicator and MPI process rank
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    #Arrays  elements (values 0-20)
    mpi_array = mpi_np.arange(21, dist='b').reshape(7, 3)
    #Reshape (Redistribute) Array Data
    reshaped_mpi_array = mpi_array.reshape(3, 7)

    #Capture distributed array data
    replicated_mpi_array = mpi_array[:]
    #Capture reshaped distributed array data
    replicated_reshaped_mpi_array = reshaped_mpi_array[:]

    if rank == 0:
        print('Original Distributed Array Contents: \n{}\n'\
            .format(replicated_mpi_array))
    if rank == 0:
        print('Reshaped Distributed Array Contents: \n{}\n'\
            .format(replicated_reshaped_mpi_array))
    comm.Barrier()

    print('Rank {} Original Local Array Contents: \n{}\n'\
        .format(rank, mpi_array))
    comm.Barrier()
from mpi4py import MPI

import mpids.MPInumpy as mpi_np

if __name__ == "__main__":

    #Capture default communicator and MPI process rank
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()

    #Arrays  elements (values 0-15)
    block_mpi_array = mpi_np.arange(16).reshape(4, 4)

    #Capture distributed array data
    replicated_mpi_array = block_mpi_array[:]

    if rank == 0:
        print('Distributed Array Contents:\n{}\n'.format(replicated_mpi_array))
    comm.Barrier()

    output = 'Rank {} Local Array Contents:\n{}\n\n'.format(
        rank, block_mpi_array)
    output += 'Rank {} Reduction Results:\n'.format(rank)
    output += '\tarray.max() = {}\n'.format(block_mpi_array.max())
    output += '\tarray.max(axis=0) = {}\n'.format(block_mpi_array.max(axis=0))
    output += '\tarray.max(axis=1) = {}\n'.format(block_mpi_array.max(axis=1))
    output += '\tarray.mean() = {}\n'.format(block_mpi_array.mean())
    output += '\tarray.mean(axis=0) = {}\n'.format(
        block_mpi_array.mean(axis=0))
    output += '\tarray.mean(axis=1) = {}\n'.format(
        block_mpi_array.mean(axis=1))