Esempio n. 1
0
    def init(self, calling_realm):

        self.realm = calling_realm
        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        try:
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
                MPI_Comm = c_int
            else:
                MPI_Comm = c_void_p
        #Some versions of MPI4py have no _sizeof method.
        except AttributeError:
            MPI_Comm = c_int

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value
        self.COMM = newcomm

        return newcomm
Esempio n. 2
0
def get_task_comm():
    from mpi4py import MPI
    import ctypes

    # print("turbine_helpers.task_comm: %i" % task_comm)
    # sys.stdout.flush()

    mpi4py_comm = MPI.Intracomm()
    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
        # MPICH
        comm_int = ctypes.c_int
        mpi4py_comm_ptr = comm_int.from_address(MPI._addressof(mpi4py_comm))
        mpi4py_comm_ptr.value = task_comm
    elif MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_void_p):
        # OpenMPI
        comm_pointer = ctypes.c_void_p
        mpi4py_comm = MPI.Intracomm()
        handle = comm_pointer.from_address(MPI._addressof(mpi4py_comm))
        handle.value = task_comm

    return mpi4py_comm
Esempio n. 3
0
 def testIntraNull(self):
     comm_null = MPI.Intracomm()
     self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Dup)
     self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Create, MPI.GROUP_EMPTY)
     self.assertRaisesMPI(MPI.ERR_COMM, comm_null.Split, color=0, key=0)
Esempio n. 4
0
 def testContructorIntra(self):
     comm_null = MPI.Intracomm()
     self.assertFalse(comm_null is MPI.COMM_NULL)
     self.assertEqual(comm_null, MPI.COMM_NULL)
Esempio n. 5
0
import numpy as np
import scipy as sp
from numpy import linalg as LA
from ConsensusMethods import *
from mpi4py import MPI
from MNIST_Loader import *
from MainMethods import *

## Network and Global Variable Setup

comm = MPI.COMM_WORLD
comm1= MPI.Intracomm(comm)
rank = MPI.COMM_WORLD.Get_rank()
size = MPI.COMM_WORLD.Get_size()
name = MPI.Get_processor_name()
index = [4,8,12,16,20] 
edges = [1,2,3,4,0,2,3,4,0,1,3,4,0,1,2,4,0,1,2,3] #fully connected network!
c = comm1.Create_graph(index, edges, reorder = False) 
np.set_printoptions(precision=3)
node_names = list('ABCDE')
transmissionTag = 4
degrees = discoverDegrees(c,comm,node_names)
time.sleep(0.5) #keeps node outputs clean
weights = writeWeights(comm,c,degrees)

tic = time.time()

##################################################
################ Debugging option ################
option = ['transfer','consensus','power']
# Choose an option to test here
Esempio n. 6
0
from deap import creator, base, tools, algorithms
from deap110 import emo
from JSPEval.jspsolution import JspSolution
from JSPEval.jspmodel import JspModel
from JSPEval.jspeval import JspEvaluator
import params
import operators
import output

# ---  Setup  ---

# MPI environment
comm = MPI.COMM_WORLD
size = comm.Get_size()
topology = params.calculate_topology(size)
cart = MPI.Intracomm(comm).Create_cart([topology[0], topology[1]],
                                       [True, True])
rank = cart.Get_rank()

# calculate neighbors
coord = cart.Get_coords(rank)
neighbor_coords = [[coord[0] - 1, coord[1]], [coord[0], coord[1] - 1],
                   [coord[0] + 1, coord[1]], [coord[0], coord[1] + 1]]
neighbors = set(map(cart.Get_cart_rank, neighbor_coords))
print('rank: {}, neighbors: {}'.format(rank, neighbors))

# read parameters
term_m, term_v, pop_size, f_out, f_model, migr_int, migr_size,\
        mut_prob, mut_eta, xover_prob, xover_eta = params.get()

# start multiple runs
start = time.time()
Esempio n. 7
0
def start():
    '''Start running a debugging version of the Cloud K-SVD Algorithm'''
    ## Network and Global Variable Setup
    comm = MPI.COMM_WORLD
    comm1= MPI.Intracomm(comm)
    rank = MPI.COMM_WORLD.Get_rank()
    size = MPI.COMM_WORLD.Get_size()
    name = MPI.Get_processor_name()
    index = [4,8,12,16,20] 
    edges = [1,2,3,4,0,2,3,4,0,1,3,4,0,1,2,4,0,1,2,3] #fully connected network!
    c = comm1.Create_graph(index, edges, reorder = False) 
    np.set_printoptions(precision=3)
    node_names = list('ABCDE')
    transmissionTag = 4
    degrees = discoverDegrees(c,comm,node_names)
    time.sleep(0.5) #keeps node outputs clean
    weights = writeWeights(comm,c,degrees)

    tic = time.time()

    ##################################################
    ################ Debugging option ################
    option = ['transfer','consensus','power']
    # Choose an option to test here
    test = option[2]
    ##################################################

    # Various debug methods to test individual functions in "ConsensusMethods.py"

    if test == 'transfer': #Tes

        data_size = 2000 
        timeOut   = 0.100
        mat = np.matrix(np.floor(np.random.rand(data_size,1)*100))
        

        tic = time.time()
        data = transmitData(mat,comm,c,node_names,transmissionTag,timeOut)

        print("%s: Time(ms) taken= %d" % (node_names[rank],((time.time()-tic)*1000)))
        
        time.sleep(0.5)
        print("%s: data dropped= %d" % (node_names[rank],
            (size-len([i for x,i in enumerate(data) if i is not None]))))

    elif test == 'consensus':

        data_size = 10
        tc = 3
        CorrectiveSpacing = 1000
        timeOut =     0.100
        mat = np.matrix(np.floor(np.random.rand(data_size,1)*10))


        tic = time.time()
        average = correctiveConsensus(mat,tc,weights,comm,c,node_names,transmissionTag,
            CorrectiveSpacing,timeOut)

        print("%s: Time(ms) taken= %d" % (node_names[rank],((time.time()-tic)*1000)))

        time.sleep((rank+2)/2)
        print("%s: Average reached: " % (node_names[rank]), np.transpose(average))

    elif test == 'power':

        tc,tp = 2,2 #consensus, power ; iterations
        CorrectiveSpacing = 3
        defaultWait = 0.100
        timeOut =     0.100
        mat = np.floor(np.random.rand(2,2)*100)

        tic = time.time()
        average = powerMethod(mat,tc,tp,weights,comm,c,node_names,
            transmissionTag,CorrectiveSpacing,timeOut)

        print("%s: Time(ms) taken= %d" % (node_names[rank],((time.time()-tic)*1000)))

        time.sleep((rank+2)/2)
        print("%s: Eigenvector estimate reached: " % (node_names[rank]), (average))  
Esempio n. 8
0
def start():
    ## Network Setup
    comm = MPI.COMM_WORLD
    comm1 = MPI.Intracomm(comm)
    rank = MPI.COMM_WORLD.Get_rank()
    size = MPI.COMM_WORLD.Get_size()
    name = MPI.Get_processor_name()
    index = [3, 6, 9, 12]
    edges = [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1,
             2]  #Sparsely connected network of 4 nodes
    c = comm1.Create_graph(index, edges)
    np.set_printoptions(precision=3)
    tic = time.time()

    ## Prelims
    node_names = list('ABCDE')
    degrees = discoverDegrees(c, comm, node_names)
    weights = writeWeights(comm, c, degrees)

    ## Signals
    folder = 'python-mnist/data'
    resolution = (24, 24)
    ddim = resolution[0] * resolution[1]  #Data dimension
    classes = [0, 3, 5, 8, 9]  #MNIST digits to collect
    amount = 10  #Samples of atoms for each class; K = len(classes)*amount
    signals = 20  #Samples in Y

    ## Collect MNIST data
    time.sleep(0.1)
    tic = time.time()
    D, D_labels, Y, Y_labels = importMNIST(folder, resolution, classes, amount,
                                           signals)

    # D = np.matrix(np.random.rand(10,15)) #random matricies for debugging
    # Y = np.matrix(np.random.rand(10,20))

    S = signals  #Same as in paper
    K = np.shape(D)[1]  #Same as in paper
    ddim = np.shape(D)[0]
    print('done')

    ## Cloud params (some may be command line args)

    # check for command line args
    try:
        args = sys.argv
        tD = int(args[1])  # cloud kSVD iterations
        t0 = int(args[2])  # sparsity
        tc = int(args[3])  # consensus iterations
        tp = int(args[4])  # power iterations
    except IndexError:
        print(
            'Using default parameters because error: incorrect number of arguments detected'
        )
        print(
            'Usage: mpiexec -n <N> --hostifle <./hostfile> python CloudkSVD.py <tD> <t0> <tc> <tp>'
        )
        tD, t0, tc, tp = 5, 5, 2, 2
    except ValueError:
        print(
            'Using default parameters because error: arguments must be integers'
        )
        tD, t0, tc, tp = 5, 5, 2, 2

    print(tD, t0, tc, tp, args)

    refvec = np.matrix(np.ones(
        (ddim, 1)))  #Q_init for power method, sets direction of result
    Tag = 11  #Transmission tag, ensures MPI transmissions don't interfere
    CorrectiveSpacing = 3  #Regular iterations before a corrective iteration
    timeOut = 0.150  #Time nodes wait before they move on

    ## Main
    time_sync(tic, 15)  #NODES NEED TO START AROUND SAME TIME!
    #else segmentation fault or massive packet loss
    print('starting C-KSVD')

    rt0 = time.time()
    D, X, rerror = CloudKSVD(D, Y, refvec, tD, t0, tc, tp, weights, comm, c,
                             node_names, Tag, CorrectiveSpacing, timeOut)
    rt1 = time.time()
    rt = rt1 - rt0  # total run time of cloud kSVD

    ## Data Collection
    f = open('running_time_log.txt', 'a+')
    buf1 = '\nResolution: ' + str(resolution) + '\nClasses: ' + str(
        classes) + '\nAmount per class: %d \nSignals: %d \n' % (amount,
                                                                signals)
    buf2 = '\ntD: %d \nt0: %d \ntc: %d \ntp: %d \n' % (tD, t0, tc, tp)
    buf3 = '\nTotal ckSVD running time: %9.6f \n' % rt
    f.write(buf1 + buf2 + buf3)
    f.close()

    ## Results
    error = np.linalg.norm(Y - np.dot(D, X))**2  #L2 norm squared for error

    ## Residuals
    c_atoms = []

    for c in classes:
        c_atoms.append(list([i for i, x in enumerate(D_labels)
                             if x == c]))  #atoms corresponding to each class

    residual = np.matrix(np.zeros((S, len(classes))))  #residuals

    for s in xrange(0, S):  #for each signal
        for c in xrange(0, len(classes)):  #for each class
            if len(c_atoms[c]) > 0:
                residual[s, c] = LA.norm(Y[:, s] -
                                         D[:, c_atoms[c]] * X[c_atoms[c], :])
            else:
                residual[
                    s,
                    c] = 1000  #magic num residual if no relevant atoms detected, likely will never see this in practice

    guesses = [
        classes[mins] for mins in list(
            [np.argmin(residual[s, :]) for s in list(xrange(0, S))])
    ]  #predicted classes
    actual = list(Y_labels)  #actual classes

    if rank == 0:
        print(residual)
        print(guesses)
        print(actual)

    accuracy = float(sum(
        np.array(guesses) == np.array(actual))) / S  #classification accuracy

    print("Node %s accuracy: %d%s" % (node_names[rank], accuracy * 100, "%"))