Exemplo n.º 1
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world process group
    whole = world.group()
    # build a tuple of the even ranks
    ranks = tuple(rank for rank in range(world.size) if (rank % 2 == 0))

    # build two groups
    odds = whole.exclude(ranks)
    evens = whole.include(ranks)

    # compute the union of the two
    union = odds.union(evens)
    # verify that the size is right
    assert union.size == world.size

    # compute the intersection of the two
    intersection = odds.intersection(evens)
    # verify the this is an empty group
    assert intersection.isEmpty()

    # compute the difference (world - odd)
    difference = whole.difference(odds)
    # verify it is the same size as evens
    assert difference.size == evens.size

    return
Exemplo n.º 2
0
Arquivo: port.py Projeto: pyre/pyre
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # its size
    size = world.size
    # and my rank
    rank = world.rank

    # check that the world has at least two tasks
    if size < 2: return

    # the source of the message i will receive
    source = world.port(peer=(rank-1)%size)
    # the destination of the message I will send
    destination = world.port(peer=(rank+1)%size)

    # send my message to the guy to my right
    destination.sendString("Hello {}!".format(destination.peer))
    # and receive a message from the guy to my left
    message = source.recvString()
    # and check its contents
    assert message == "Hello {}!".format(rank)

    # repeat by exchanging pickled objects
    destination.send("Hello {}!".format(destination.peer))
    message = source.recv()
    # checks
    assert message == "Hello {}!".format(rank)

    # all done
    return
Exemplo n.º 3
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world process group
    whole = world.group()
    # build a tuple of the even ranks
    ranks = tuple(rank for rank in range(world.size) if (rank % 2 == 0))
    # convert it into a group
    even = whole.include(ranks)

    # build a group with the odd ranks from the difference (world - even)
    odd = whole.difference(even)
    # and the matching communicator
    new = world.restrict(odd)

    # check
    # if I have even rank
    if world.rank % 2 == 0:
        # then {new} must be {None}
        assert new is None
    # otherwise
    else:
        # I must have a valid communicator
        assert new is not None
        # whose size is related to the world size
        assert new.size == world.size // 2
        # and my ranks must be related
        assert new.rank == world.rank // 2

    return
Exemplo n.º 4
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # its size
    size = world.size
    # and my rank
    rank = world.rank

    # check that the world has at least two tasks
    if size < 2: return

    # the source of the message i will receive
    source = world.port(peer=(rank-1)%size)
    # the destination of the message I will send
    destination = world.port(peer=(rank+1)%size)

    # send my message to the guy to my right
    destination.sendString("Hello {}!".format(destination.peer))
    # and receive a message from the guy to my left
    message = source.recvString()
    # and check its contents
    assert message == "Hello {}!".format(rank)

    # repeat by exchanging pickled objects
    destination.send("Hello {}!".format(destination.peer))
    message = source.recv()
    # checks
    assert message == "Hello {}!".format(rank)

    # all done
    return
Exemplo n.º 5
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world process group
    whole = world.group()
    # build a tuple of the even ranks
    ranks = tuple(rank for rank in range(world.size) if (rank % 2 == 0))
    # convert it into a group
    even = whole.include(ranks)

    # build a group with the odd ranks from the difference (world - even)
    odd = whole.difference(even)
    # and the matching communicator
    new = world.restrict(odd)

    # check
    # if I have even rank
    if world.rank % 2 == 0:
        # then {new} must be {None}
        assert new is None
    # otherwise
    else:
        # I must have a valid communicator
        assert new is not None
        # whose size is related to the world size
        assert new.size == world.size // 2
        # and my ranks must be related
        assert new.rank == world.rank // 2

    return
Exemplo n.º 6
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world process group
    whole = world.group()
    # build a tuple of the even ranks
    ranks = tuple(rank for rank in range(world.size) if (rank % 2 == 0))

    # build two groups
    odds = whole.exclude(ranks)
    evens = whole.include(ranks)

    # compute the union of the two
    union = odds.union(evens)
    # verify that the size is right
    assert union.size == world.size

    # compute the intersection of the two
    intersection = odds.intersection(evens)
    # verify the this is an empty group
    assert intersection.isEmpty()

    # compute the difference (world - odd)
    difference = whole.difference(odds)
    # verify it is the same size as evens
    assert difference.size == evens.size

    return
Exemplo n.º 7
0
Arquivo: max.py Projeto: jlmaurer/pyre
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # and its structure
    rank = world.rank
    size = world.size
    # set up a destination for the reduction
    destination = int(size / 2)
    # create a value
    number = rank**2
    # perform the reduction
    largest = world.max(item=number, destination=destination)
    # check it
    if rank == destination:
        assert largest == (size - 1)**2
    else:
        assert largest is None
    # perform the all process reduction
    largest = world.max(item=number)
    # check it
    assert largest == (size - 1)**2
    # all done
    return
Exemplo n.º 8
0
def test():
    # setup the workload
    samples = 8
    value = 7.0

    # externals
    import mpi
    import cuda

    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # decide which task is the source
    source = 0

    # vector test
    v = cuda.vector(shape=samples)

    # set values at the source task
    if rank == source:
        v.fill(value)

    # broadcast
    v.bcast(communicator=world, source=source)

    # verify that i got the correct part
    cv = v.copy_to_host()
    for index in range(samples):
        assert cv[index] == value

    # matrix test
    m = cuda.matrix(shape=(samples, samples))

    # set values at the source task
    if rank == source:
        m.fill(value)

    # broadcast
    m.bcast(communicator=world, source=source)

    # verify that i got the correct part
    cm = m.copy_to_host()
    for i in range(samples):
        for j in range(samples):
            assert cm[i, j] == value

    # all done
    return
Exemplo n.º 9
0
def test():
    # setup the workload
    sampleSize = 4
    samplesPerTask = 4
    workload = (samplesPerTask, sampleSize)

    # externals
    import mpi
    import gsl

    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # decide which task is the source
    source = 0
    # at the source task
    if rank == source:
        # allocate a matrix
        θ = gsl.matrix(shape=(tasks * samplesPerTask, sampleSize))
        # initialize it
        for task in range(tasks):
            for sample in range(samplesPerTask):
                for dof in range(sampleSize):
                    offset = task * samplesPerTask + sample
                    θ[offset, dof] = offset * sampleSize + dof
        # print it out
        # θ.print(format="{}")
    # the other tasks
    else:
        # have a dummy source matrix
        θ = None

    # build the destination matrix
    part = gsl.matrix(shape=workload)
    # make a partition
    part.excerpt(communicator=world, source=source, matrix=θ)

    # verify that i got the correct part
    for row in range(samplesPerTask):
        for column in range(sampleSize):
            assert part[
                row,
                column] == (rank * samplesPerTask + row) * sampleSize + column

    # all done
    return
Exemplo n.º 10
0
def test():
    # access the package
    import mpi
    # initialize mpi
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world process group
    whole = world.group()

    # check that I can compute ranks correctly
    assert world.rank == whole.rank

    return
Exemplo n.º 11
0
def main():
    myrank, size = mpi.init()

    # split the problem in chunks

    if problemlength % size == 0:
        blocksize = problemlength / size
    else:
        print "Sorry, I don't know how to split up the problem, aborting!"
        mpi.finalize()
        
    if myrank == 0:
        data = range(1,problemlength + 1)  # create a toy dataset...
        random.shuffle(data)               # ...modifies data in place

        mydata = data[0:blocksize] # get some data for me...
                                   # and communicate the rest to slaves

        for host in range(1,size):
            hisdata = data[blocksize*host:blocksize*(host+1)]
            mpi.send(hisdata,blocksize,mpi.MPI_INT,host,0,mpi.MPI_COMM_WORLD)
    else:
        mydata = mpi.recv(blocksize,mpi.MPI_INT,0,0,mpi.MPI_COMM_WORLD)

    mymax = max(mydata)

    maximums = mpi.gather(mymax,1,mpi.MPI_INT, size, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD)

    if myrank == 0:
        mymax = max(maximums)
        print "The maximum value is:", mymax

    mpi.finalize()            
Exemplo n.º 12
0
def main():
    # Start MPI
    myrank, size = mpi.init()
    # Create a toy dataset:
    data = range( 1, 1001 ) # We know what the max will be already :-)
    random.shuffle( data ) # Modifies data in place

    #  Divide up the problem (if we can divide it evenly)
    if( len(data) % size == 0 ):  
        blocksize = len(data) / size
        start = blocksize * myrank
        end = start + blocksize
        mydata = data[ start : end ]
        max = -1
        for i in mydata:
            if ( i > max ):
                max = i
        maximums = mpi.gather( max, 1, mpi.MPI_INT, size, mpi.MPI_INT, 0,
                                       mpi.MPI_COMM_WORLD)
        if ( myrank == 0 ):
            max = -1
            for i in maximums:
                if ( i > max ):
                    max = i
            print "The maximum value is:",max
            mpi.finalize()
    else:
        print "Sorry, I don't know how to split up the problem, aborting!"
        mpi.finalize()
Exemplo n.º 13
0
Arquivo: ip.py Projeto: pyre/pyre
def test():
    # externals
    import mpi
    import socket

    # initialize mpi
    mpi.init()
    # get the world communicator
    world = mpi.world
    # get my ip address
    host = socket.gethostname()

    print("{0.rank:03}/{0.size:03}: {1}".format(world, host))

    # all done
    return
Exemplo n.º 14
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # set up a source for the broadcast
    source = int(world.size / 2)
    # create a message
    item = message(data="Hello from {}".format(source))
    # broadcast it
    received = world.bcast(item=item, source=source)
    # check it
    assert received == item
    # all done
    return
Exemplo n.º 15
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # set up a source for the broadcast
    source = int(world.size / 2)
    # create a message
    item = message(data="Hello from {}".format(source))
    # broadcast it
    received = world.bcast(item=item, source=source)
    # check it
    assert received == item
    # all done
    return
Exemplo n.º 16
0
def test():
    # setup the workload
    sampleSize = 4
    samplesPerTask = 1
    workload = (samplesPerTask, sampleSize)

    # externals
    import mpi
    import gsl

    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # build my contribution
    θ = gsl.matrix(shape=workload)
    # and initialize it
    for row in range(samplesPerTask):
        for column in range(sampleSize):
            θ[row,
              column] = (rank * samplesPerTask + row) * sampleSize + column

    # decide on the destination task
    destination = 0
    # exercise it
    result = gsl.matrix.collect(matrix=θ,
                                communicator=world,
                                destination=destination)

    # at the destination task
    if rank == destination:
        # verify that i got the correct parts
        for task in range(tasks):
            for sample in range(samplesPerTask):
                for dof in range(sampleSize):
                    offset = task * samplesPerTask + sample
                    assert result[offset, dof] == offset * sampleSize + dof
        # print it out
        # result.print(format='{}')

    # all done
    return
Exemplo n.º 17
0
def test():
    # setup the workload
    samples = 4
    parameters = 8
    workload = (samples, parameters)

    # externals
    import mpi
    import gsl

    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # decide which task is the source
    source = 0
    # at the source task
    if rank == source:
        # allocate a matrix
        θ = gsl.matrix(shape=workload)
        # initialize it
        for sample in range(samples):
            for dof in range(parameters):
                θ[sample, dof] = sample * parameters + dof
        # print it out
        # θ.print(format="{}")
    # the other tasks
    else:
        # have a dummy source matrix
        θ = None

    # broadcast
    result = gsl.matrix.bcast(source=source, matrix=θ)

    # verify that i got the correct part
    for sample in range(samples):
        for dof in range(parameters):
            assert result[sample, dof] == sample * parameters + dof

    # all done
    return
Exemplo n.º 18
0
def test():
    # setup the workload
    samplesPerTask = 8
    workload = samplesPerTask

    # externals
    import mpi
    import gsl

    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # decide which task is the source
    source = 0
    # at the source task
    if rank == source:
        # allocate a vector
        θ = gsl.vector(shape=tasks * samplesPerTask)
        # initialize it
        for task in range(tasks):
            for sample in range(samplesPerTask):
                offset = task * samplesPerTask + sample
                θ[offset] = offset
        # print it out
        # θ.print(format="{}")
    # the other tasks
    else:
        # have a dummy source vector
        θ = None

    # make a partition
    part = gsl.vector(shape=workload)
    part.excerpt(communicator=world, source=source, vector=θ)

    # verify that i got the correct part
    for index in range(samplesPerTask):
        assert part[index] == rank * samplesPerTask + index

    # all done
    return
Exemplo n.º 19
0
def test():
    # access the package
    import mpi
    # initialize it
    mpi.init()
    # get the world communicator
    world = mpi.world
    # extract the size of the communicator and my rank within it
    size = world.size
    rank = world.rank
    # verify that my rank is within range
    assert rank in range(size)

    # for debugging purposes:
    # import platform
    # print("Hello from {}/{}: {}".format(rank, size, platform.node()))

    # all done
    return
Exemplo n.º 20
0
def test():
    # access the package
    import mpi
    # initialize it
    mpi.init()
    # get the world communicator
    world = mpi.world
    # extract the size of the communicator and my rank within it
    size = world.size
    rank = world.rank
    # verify that my rank is within range
    assert rank in range(size)

    # for debugging purposes:
    # import platform
    # print("Hello from {}/{}: {}".format(rank, size, platform.node()))

    # all done
    return
Exemplo n.º 21
0
def test():
    # setup the workload
    samplesPerTask = 8
    workload = samplesPerTask

    # externals
    import mpi
    import gsl

    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # figure out its geometry
    rank = world.rank
    tasks = world.size

    # build my contribution
    θ = gsl.vector(shape=workload)
    # and initialize it
    for index in range(samplesPerTask):
        θ[index] = rank * samplesPerTask + index

    # decide on the destination task
    destination = 0
    # exercise it
    result = gsl.vector.collect(vector=θ,
                                communicator=world,
                                destination=destination)

    # at the destination task
    if rank == destination:
        # verify that i got the correct parts
        for task in range(tasks):
            for index in range(samplesPerTask):
                offset = task * samplesPerTask + index
                assert result[offset] == offset
        # print it out
        # result.print(format='{}')

    # all done
    return
Exemplo n.º 22
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world group
    whole = world.group()
    # slice just the even ranks
    evens = whole.include(rank for rank in range(world.size) if (rank % 2 == 0))

    # check that the size of this group is half the number of processors
    assert evens.size == (world.size+1) // 2

    # and check my rank
    if world.rank % 2 == 0:
        assert evens.rank == world.rank / 2
    else:
        assert evens.rank == evens.mpi.undefined

    return
Exemplo n.º 23
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # grab the world communicator
    world = mpi.world
    # access the world group
    whole = world.group()
    # slice just the even ranks
    evens = whole.exclude(rank for rank in range(world.size) if (rank % 2 != 0))

    # check that the size of this group is half the number of processors
    assert evens.size == (world.size+1) // 2

    # and check my rank
    if world.rank % 2 == 0:
        assert evens.rank == world.rank / 2
    else:
        assert evens.rank == evens.mpi.undefined

    return
Exemplo n.º 24
0
Arquivo: sum.py Projeto: lijun99/pyre
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # and its structure
    rank = world.rank
    size = world.size
    # set up a destination for the reduction
    destination = int(size / 2)
    # create a value
    number = rank**2
    # perform the reduction
    total = world.sum(item=number, destination=destination)
    # check it
    if rank == destination:
        assert total == (size - 1) * size * (2 * size - 1) / 6
    else:
        assert total is None
    # all done
    return
Exemplo n.º 25
0
def test():
    # access the package
    import mpi
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # and its structure
    rank = world.rank
    size = world.size
    # set up a destination for the reduction
    destination = int(size / 2)
    # create a value
    number = rank**2
    # perform the reduction
    total = world.sum(item=number, destination=destination)
    # check it
    if rank == destination:
        assert total == (size-1)*size*(2*size-1)/6
    else:
        assert total is None
    # all done
    return
Exemplo n.º 26
0
def test():
    # externals
    import mpi
    import math
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # and its structure
    rank = world.rank
    size = world.size
    # set up a destination for the reduction
    destination = int(size / 2)
    # create a value
    number = rank + 1
    # perform the reduction
    product = world.product(item=number, destination=destination)
    # check it
    if rank == destination:
        assert product == math.factorial(size)
    else:
        assert product is None
    # all done
    return
Exemplo n.º 27
0
def test():
    # externals
    import mpi
    import math
    # initialize
    mpi.init()
    # get the world communicator
    world = mpi.world
    # and its structure
    rank = world.rank
    size = world.size
    # set up a destination for the reduction
    destination = int(size / 2)
    # create a value
    number = rank + 1
    # perform the reduction
    product = world.product(item=number, destination=destination)
    # check it
    if rank == destination:
        assert product == math.factorial(size)
    else:
        assert product is None
    # all done
    return
Exemplo n.º 28
0
def main():
    rank,size = mpi.init()
    
    serial_dict = pickle.dumps(somedict)

    mpi.isend( serial_dict, len(serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )

    new_serial_dict = mpi.recv( len( serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )
    print new_serial_dict

    mpi.finalize()

    newdict = pickle.loads( new_serial_dict )
    print newdict
    return
Exemplo n.º 29
0
def test():
    # access the extension module
    import mpi
    # initialize it
    ext = mpi.init()
    # get the world communicator
    world = ext.world
    # extract the size of the communicator and my rank within it
    size = ext.communicatorSize(world)
    rank = ext.communicatorRank(world)
    # verify that my rank is within range
    assert rank in range(size)

    # for debugging purposes:
    # print("Hello from {}/{}!".format(rank, size))

    # all done
    return
Exemplo n.º 30
0
    def parallel(self, *args, **kwds):
        """
        Called after the parallel machine has been built and it is time to invoke the user's
        code in every node
        """
        # pull the runtime support
        import mpi
        # and try to initialize it
        if mpi.init():
            # if all goes well, grant access to the global communicator
            self.world = mpi.world
            # launch the application and return its exit code
            return super().launch(*args, **kwds)

        # if something went wrong, get the journal
        import journal
        # make a channel
        channel = journal.error("mpi.init")
        # complain
        channel.log("failed to initialize the mpi runtime support")
        # and bail with an error code
        return 1
Exemplo n.º 31
0
    def parallel(self, *args, **kwds):
        """
        Called after the parallel machine has been built and it is time to invoke the user's
        code in every node
        """
        # pull the runtime support
        import mpi
        # and try to initialize it
        if mpi.init():
            # if all goes well, grant access to the global communicator
            self.world = mpi.world
            # launch the application and return its exit code
            return super().launch(*args, **kwds)

        # if something went wrong, get the journal
        import journal
        # make a channel
        channel = journal.error("mpi.init")
        # complain
        channel.log("failed to initialize the mpi runtime support")
        # and bail with an error code
        return 1
Exemplo n.º 32
0
import Numeric as nm
import mpi

mpi.init()
rank = mpi.comm_rank(mpi.MPI_COMM_WORLD)
size = mpi.comm_size(mpi.MPI_COMM_WORLD)
root = 0

message = [rank] * (size + rank)
print "Sending:", message

recvcounts = mpi.gather(len(message), 1, mpi.MPI_INT, 1, mpi.MPI_INT, root, mpi.MPI_COMM_WORLD)

displacements = [0]
for i in recvcounts[:-1]:
    displacements.append(i)

result = mpi.gatherv(
    message, len(message), mpi.MPI_INT, recvcounts, displacements, mpi.MPI_INT, root, mpi.MPI_COMM_WORLD
)
if rank == root:
    print "Received:", result

mpi.finalize()
Exemplo n.º 33
0
    name = "test"
    local_rank = mpi.comm_rank( local_comm )
    local_size = mpi.comm_size( local_comm )
    
    print "%s (%s,%s): creating root communicator!"%(name,local_rank,local_size)
    sys.stdout.flush()
    if local_rank == 0:
        tmp_comm = mpi.comm_split( mpi.MPI_COMM_WORLD, 5, 0 )
        print "%s (%s,%s): joined root communicator %s"%(name,local_rank,local_size,tmp_comm)
        sys.stdout.flush()
        ncomponents = mpi.comm_size( tmp_comm )
    else:
        tmp_comm = mpi.comm_split( mpi.MPI_COMM_WORLD, 6, 0 )
        print "%s (%s,%s): Joined non-root communicator %s"%(name,local_rank,local_size,tmp_comm)
        sys.stdout.flush()
        ncomponents = 0
    print "%s (%s,%s): Distributing root communicator!"%(name,local_rank,local_size)
    ncomponents = mpi.bcast( ncomponents, 1, mpi.MPI_INT, 0, local_comm )
    ncomponents = ncomponents[0]
    print "%s (%s,%s): Distributed root communicator!"%(name,local_rank,local_size)
    # Get total number of components and distribute to every node:
    # ncomponents = mpi.allreduce( ncomponents, 1, mpi.MPI_INT, mpi.MPI_SUM, root_comm )
    #print "%s (%s,%s): Root Comm = %s"%(name,local_rank,local_size,root_comm)
    #ncomponents = mpi.comm_size( root_comm )
    print "%s(%s,%s): ncomponents = %s"%(name, local_rank, local_size, ncomponents )

if __name__=="__main__":
    rank,size = mpi.init( len(sys.argv), sys.argv )
    main( mpi.MPI_COMM_WORLD )
    mpi.finalize()
Exemplo n.º 34
0
def main():
    mpi.init(len(sys.argv), sys.argv)
    mpi.init(len(sys.argv), sys.argv)
    mpi.finalize()
Exemplo n.º 35
0
import mpi
import random

def computePi( size, nsamples):
    oldpi, pi, mypi,pisum = 0.0,0.0,0.0,0.0
    done = False
    
    inside = 0
    # Monte Carlo bit
    for i in xrange(nsamples):
        x = random.random()
        y = random.random()
        if ((x*x)+(y*y)<1):
            inside+=1
    # 
    sum_inside = mpi.allreduce(inside, 1, mpi.MPI_INT, mpi.MPI_SUM, mpi.MPI_COMM_WORLD) 
    # The "* 4" is needed because we're computing the number of points inside
    # a QUARTER unit circle.  So we're really computing (PI / 4).
    pi = ( sum_inside[0] / (nsamples*size*1.0) ) * 4
    return pi

if __name__=="__main__":
    rank, size = mpi.init()
    # More sample points should make a more accurate value for pi.
    pi = computePi( size, 10000 )
    if(rank==0):
        print "Computed value of pi on",size,"processors is",pi
    mpi.finalize()

Exemplo n.º 36
0
 def __init__(self):
     """Constructor. See above."""
     self.rank, self.size = mpi.init()
Exemplo n.º 37
0
def main():
    print sys.argv
    rank, size = mpi.init()
    print "size: %d, rank: %d" % (size, rank)
    print sys.argv
    mpi.finalize()