Beispiel #1
0
def gather( array, root, comm ):
    """
    """
    myid = core.comm_rank(comm)
    nprocs = core.comm_size(comm)
    if( myid==root ):
        print nm.size(array)
        datatype = getMpiType(array)
        size = nm.size(array)
        shape = nm.shape(array)
        rank = nm.rank(array)
    else:
        datatype = 0
        size = 0
        shape = 0
        rank = 0
    datatype = core.bcast( datatype, 1, core.MPI_INT, root, comm )
    rank = core.bcast( rank, 1, core.MPI_INT, root, comm )
    shape = core.bcast( shape, rank, core.MPI_INT, root, comm )
    size = core.bcast( size, 1, core.MPI_INT, root, comm )
    data = core.gather( array, size, datatype,
                        size, datatype,
                        root, comm )
    if(myid == root):
        print shape
        
        shape[0] = shape[0] * nprocs
        
        print shape
        array = nm.asarray(data)
        print nm.size(array)
        array.shape = shape
        return array
    else:
        return None
Beispiel #2
0
    def rank(self):
        """
        rank = comm.rank()

        Returns the rank (processor ID) of the calling processor.
        """
        return core.comm_rank( self.id )
Beispiel #3
0
def allreduce( message, function, root=0, comm=core.MPI_COMM_WORLD ):
    """
    """
    rank = core.comm_rank( comm )
    dataToReduce = allgather( message, root, comm )
    #print dataToReduce
    reduced = function( dataToReduce )
    return reduced
Beispiel #4
0
def bcast( message, root=0, comm=core.MPI_COMM_WORLD ):
    """
    value = bcast( rootvalue[, root=0, communicator=mpi.COMM_WORLD] )

    Broadcast:  A collective operation that communicates the value on processor 'root' of
    of communicator mpi.COMM_WORLD to all processors in communicator mpi.COMM_WORLD

    rootvalue's value is ignored on non-root processors.

    Example of use:

    # generate / read data:
    if (mpi.COMM_WORLD.comm_rank()==0):
        mydata = somefile.read()
    else:
        mydata = 0
    # broadcast my data from root to all processors:
    mydata = mpi.bcast( mydata )
    # Or equivalently:
    mydata = mpi.bcast( mydata, 0, mpi.COMM_WORLD )
    # Or even:
    mydata = mpi.COMM_WORLD.bcast( mydata )
    # Yet another:
    mydata = mpi.COMM_WORLD.bcast( mydata, 0 )
    # Finally: do something with mydata
    # on all processors.
    """
    rank = core.comm_rank( comm )
    if( rank == root ):
        messageType = getMessageType( message )
        core.bcast( messageType, 1, core.MPI_INT, root, comm )
    else:
        messageType = int(core.bcast( 0, 1, core.MPI_INT, root, comm ))

    if messageType == SINGLE:
        print "Single Element Case:"
        dataType = getSingleType( message )
        dataType = int(core.bcast( dataType, 1, core.MPI_INT, root, comm ))
        returnvalue = core.bcast( message, 1, dataType, root, comm )
        returnvalue = formatReturnValue( returnvalue, "single" )
    elif messageType == SEQUENCE: #non-array sequences
        print "Sequence Case:"
        dataType = getSequenceType( message )
        print dataType
        dataType = int(core.bcast( dataType, 1, core.MPI_INT, root, comm ))
        length = int(core.bcast( len(message), 1, core.MPI_INT, root, comm))
        print message, length, dataType, root, comm
        returnvalue = core.bcast( message, length, dataType, root, comm )
        returnvalue = formatReturnValue( returnvalue, "sequence" )
    elif messageType == ARRAY: #Array Case
        print "Array Case:"
        returnvalue = array.bcast(message, root, comm )
        returnvalue = formatReturnValue( returnvalue, "array" )
    else: #object case
        print "Generic Object Case:"
        returnvalue = sobj.bcast( message, root, comm )# (2) are the same
        returnvalue = formatReturnValue( returnvalue, "object" )
    return returnvalue
Beispiel #5
0
def reduce( array, function, root=0, comm=core.MPI_COMM_WORLD ):
    """
    """
    data = gather( array, root, comm )
    rank = core.comm_rank( comm )
    if( rank == root ):
        return function(data)
    else:
        return None
Beispiel #6
0
def scan( message, function, comm=core.MPI_COMM_WORLD ):
    """
    send: bufstarting address of send buffer (choice)
    count: number of elements in input buffer (integer)
    data: typedata type of elements of input buffer (handle)
    op: operation (handle)
    comm: communicator (handle) 
    """
    data = allgather( buffer, 0, comm )
    rank = core.comm_rank( comm )
    return function( data[0:rank+1] )
Beispiel #7
0
def allreduce( message, function, root=0, comm=core.MPI_COMM_WORLD ):
    """
    result = comm.reduce( value(s), function(operation), root(defaults to 0) )
    
    i.e.:
    global_ave = comm.reduce( local_ave, mpi.MPI_SUM )
    """
    data = allgather( message, root, comm )
    #realdata = [ pickle.loads(x) for x in data ]
    # apply function to realdata:
    rank = core.comm_rank( comm )    
    return function(data)
Beispiel #8
0
def reduce( message, function, root=0, comm=core.MPI_COMM_WORLD ):
    """
    result = comm.reduce( value(s), function(operation), root(defaults to 0) )
    
    i.e.:
        global_ave = comm.reduce( local_ave, mpi.MPI_SUM )
    """
    rank = core.comm_rank( comm )
    dataToReduce = gather( message, root, comm )
    if( rank == root ):
        #print dataToReduce
        reduced = function( dataToReduce )
        return reduced
    else:
        return None
Beispiel #9
0
    def reduce(self, message, function, root=0):
        """
        result = comm.reduce( value(s), function(operation), root(defaults to 0) )

        i.e.:
          global_ave = comm.reduce( local_ave, mpi.MPI_SUM )
        """
        data = self.gather(message, root)
        # realdata = [ pickle.loads(x) for x in data ]
        # apply function to realdata:
        rank = core.comm_rank(self.id)
        if rank == root:
            # print data
            return function(data)
        else:
            return None
Beispiel #10
0
def gatherv(  message, root = 0,comm=core.MPI_COMM_WORLD ):
    rank = core.comm_rank( comm )
    size = core.comm_size( comm )
    #raise NotImplementedError
    s = pickle.dumps(message)
    
    recvlengths = core.gather(len(s),1,core.MPI_INT,
                              1, core.MPI_INT,
                              root, comm)
    #if(rank==root):
    #    print "recvlengths:",recvlengths
        
    displacements = [0]
    displ = 0
    for rl in recvlengths[:-1]:
        displacements.append( displ+rl )
        displ += rl
        
    #if(rank==root):
    #    print "displacements:",displacements
        
    data = core.gatherv(s, len(s), core.MPI_CHAR,
                        recvlengths, displacements, core.MPI_CHAR,
                        root, comm)
    if rank==root:
        data = data.tostring()
        #    print data
        #    print "length of data:",len(data)
        i,n=0,0
        #    print recvlengths
        realdata = []
        
        for length in recvlengths:
            n += length
            #        print "i,n=%d,%d"%(i,n)
            #        print data[i:n]
            realdata.append(pickle.loads(data[i:n]))
            i += length       
    else:
        realdata = None
    return realdata
Beispiel #11
0
def bcast( array, root, comm ):
    """
    """
    myid = core.comm_rank(comm)
    if(myid == root):
        datatype = getMpiType(array)
        size = nm.size( array )
        shape = nm.shape( array )
        rank = nm.rank( array )
    else:
        datatype = 0
        size = 0
        shape = 0
        rank = 0
    datatype = core.bcast( datatype, 1, core.MPI_INT, root, comm )
    rank = core.bcast( rank, 1, core.MPI_INT, root, comm )
    shape = core.bcast( shape, rank, core.MPI_INT, root, comm )
    size = core.bcast( size, 1, core.MPI_INT, root, comm )
    data = core.bcast( array, size, datatype, root, comm )
    array = nm.asarray(data)
    array.shape = shape
    return array
Beispiel #12
0
def scatterv(buffer, root=0, comm=core.MPI_COMM_WORLD):
    """
    Input Parameters
      sendbuf: address of send buffer (choice, significant only at root)
      sendcounts: integer array (of length group size) specifying the number of elements to send to each processor
      displs: integer array (of length group size). Entry i specifies the displacement (relative to sendbuf from which to take the outgoing data to process i
      sendtype: data type of send buffer elements (handle)
      recvcount: number of elements in receive buffer (integer)
      recvtype: data type of receive buffer elements (handle)
      root: rank of sending process (integer)
      comm: communicator (handle)

    Output Parameter
      recvbuf: address of receive buffer (choice)
    """
    size = core.comm_size( comm )
    rank = core.comm_rank( comm )

    length = len(buffer)
    sendbuffer = []
    displacements = []
    sendcounts = []
    charbuffer = ""
    if( rank == root ):
        for b in buffer:
            sendbuffer.append( pickle.dumps( b ) )
        if( length % size == 0 ):
            for x in xrange( size ):
                start = x * size
                end = start + size
                elements = sendbuffer[start:end]
                es = pickle.dumps(elements)
                s = 0
                d = 0
                s = len(es)
                sendcounts.append(s)
                displacements.append(d)
                d += s
                charbuffer += es
        else:
            remainder = length % size
            for x in xrange( size ):
                if( x == 0 ):
                    start = 0
                    end = size + remainder
                else:
                    start = remainder + (x * size)
                    end = start + size
            elements = sendbuffer[start:end]
            es = pickle.dumps(elements)
            s = 0
            d = 0
            s = len(es)
            sendcounts.append(s)
            displacements.append(d)
            d+=s
            charbuffer+=es
        # SEND:
    recvbuffer = core.scatterv( charbuffer, sendcounts, displacements, core.MPI_CHAR,
                   sendcounts[rank], core.MPI_CHAR, root, comm )

    data = pickle.loads(recvbuffer)
    return data
Beispiel #13
0
def scan( buffer, function, comm=core.MPI_COMM_WORLD):
    """
    """
    data = allgather( buffer, 0, comm )
    rank = core.comm_rank( comm )
    return function( data[0:rank+1] )
Beispiel #14
0
 def __init__(self, id):
     communicator.Communicator.__init__(self, int(id))
     self.size = core.comm_size(self.id)
     self.rank = core.comm_rank(self.id)
Beispiel #15
0
 def comm_rank(self):
     return core.comm_rank(self.id)
Beispiel #16
0
def scan( array, function, comm ):
    data = allgather( array, root, comm )
    rank = core.comm_rank( comm )
    return function(data[0:rank+1])
Beispiel #17
0
def allreduce( **args ):
    """
    """
    data = allgather( array, root, comm )
    rank = core.comm_rank( comm )
    return function(data)