Esempio n. 1
0
def slave():

    sys.stderr.write("[SLAVE %d]: I am processor %d of %d on node %s\n" % (MPI_myid, MPI_myid, MPI_numproc, MPI_node))

    while 1:
        result = " "
        err, status = pypar.raw_receive(result, pypar.any_source, pypar.any_tag, return_status=True)
        sys.stderr.write(
            "[SLAVE %d]: received work '%s' with tag '%d' from node '%d'\n"
            % (MPI_myid, result, status.tag, status.source)
        )

        if status.tag == DIETAG:
            sys.stderr.write("[SLAVE %d]: received termination from node '%d'\n" % (MPI_myid, 0))
            return
        else:
            result = "X" + result
            pypar.raw_send(result, 0)
            sys.stderr.write("[SLAVE %d]: sent result '%s' to node '%d'\n" % (MPI_myid, result, 0))
Esempio n. 2
0
def slave():

    print "[SLAVE %d]: I am processor %d of %d on node %s" % (
        MPI_myid, MPI_myid, MPI_numproc, MPI_node)

    while 1:
        result = ' '
        err = pypar.raw_receive(result, pypar.MPI_ANY_SOURCE,
                                pypar.MPI_ANY_TAG)
        print "[SLAVE %d]: received work '%s' with tag '%d' from node '%d'" % (
            MPI_myid, result, err[1][1], err[1][0])

        if (err[1][1] == DIETAG):
            print "[SLAVE %d]: received termination from node '%d'" % (
                MPI_myid, 0)
            return
        else:
            result = 'X' + result
            pypar.raw_send(result, 0)
            print "[SLAVE %d]: sent result '%s' to node '%d'" % (MPI_myid,
                                                                 result, 0)
Esempio n. 3
0
def master():
    numCompleted = 0

    print "[MASTER]: I am processor %d of %d on node %s" % (
        MPI_myid, MPI_numproc, MPI_node)

    # start slaves distributing the first work slot
    for i in range(1, MPI_numproc):
        work = workList[i]
        pypar.raw_send(work, i, WORKTAG)
        print "[MASTER]: sent work '%s' to node '%d'" % (work, i)

    # dispach the remaining work slots on dynamic load-balancing policy
    # the quicker to do the job, the more jobs it takes
    for work in workList[MPI_numproc:]:
        result = '  '
        err = pypar.raw_receive(result, pypar.MPI_ANY_SOURCE,
                                pypar.MPI_ANY_TAG)
        print "[MASTER]: received result '%s' from node '%d'" % (result,
                                                                 err[1][0])
        numCompleted += 1
        pypar.raw_send(work, err[1][0], WORKTAG)
        print "[MASTER]: sent work '%s' to node '%d'" % (work, err[1][0])

    # all works have been dispatched out
    print "[MASTER]: toDo : %d" % numWorks
    print "[MASTER]: done : %d" % numCompleted

    # I've still to take into the remaining completions
    while (numCompleted < numWorks):
        result = '  '
        err = pypar.raw_receive(result, pypar.MPI_ANY_SOURCE,
                                pypar.MPI_ANY_TAG)
        print "[MASTER]: received (final) result '%s' from node '%d'" % (
            result, err[1][0])
        numCompleted += 1
        print "[MASTER]: %d completed" % numCompleted

    print "[MASTER]: about to terminate slaves"

    # say slaves to stop working
    for i in range(1, MPI_numproc):
        pypar.raw_send('#', i, DIETAG)
        print "[MASTER]: sent (final) work '%s' to node '%d'" % (0, i)

    return
Esempio n. 4
0
def master():
    numCompleted = 0

    sys.stderr.write("[MASTER]: I am processor %d of %d on node %s\n" % (MPI_myid, MPI_numproc, MPI_node))

    # start slaves distributing the first work slot
    for i in range(1, min(MPI_numproc, numWorks)):
        work = workList[i]
        pypar.raw_send(work, i, WORKTAG)
        sys.stderr.write("[MASTER]: sent work '%s' to node '%d'\n" % (work, i))

    # dispach the remaining work slots on dynamic load-balancing policy
    # the quicker to do the job, the more jobs it takes
    for work in workList[MPI_numproc:]:
        result = "  "
        err, status = pypar.raw_receive(result, pypar.any_source, pypar.any_tag, return_status=True)
        # sys.stderr.write( "[MASTER]: received result '%s' from node '%d'\n" %(result, err[1][0]))
        sys.stderr.write("[MASTER]: received result '%s' from node '%d'\n" % (result, status.source))
        numCompleted += 1
        pypar.raw_send(work, status.source, WORKTAG)
        sys.stderr.write("[MASTER]: sent work '%s' to node '%d'\n" % (work, status.source))

    # all works have been dispatched out
    sys.stderr.write("[MASTER]: toDo : %d\n" % numWorks)
    sys.stderr.write("[MASTER]: done : %d\n" % numCompleted)

    # I've still to take into the remaining completions
    while numCompleted < numWorks:
        result = "  "
        err, status = pypar.raw_receive(result, pypar.any_source, pypar.any_tag, return_status=True)
        sys.stderr.write("[MASTER]: received (final) result '%s' from node '%d'\n" % (result, status.source))
        numCompleted += 1
        sys.stderr.write("[MASTER]: %d completed\n" % numCompleted)

    sys.stderr.write("[MASTER]: about to terminate slaves\n")

    # say slaves to stop working
    for i in range(1, MPI_numproc):
        pypar.raw_send("#", i, DIETAG)
        sys.stderr.write("[MASTER]: sent (final) work '%s' to node '%d'\n" % (0, i))

    return
Esempio n. 5
0
print "I am processor %d of %d on node %s" %(myid, numproc, node)
pypar.Barrier()


if numproc > 1:
  # Test simple raw communication (arrays, strings and general)
  #
  N = 17 #Number of elements
  
  if myid == 0:
    # Integer arrays
    #
    A = Numeric.array(range(N))
    B = Numeric.zeros(N)    
    pypar.raw_send(A,1)
    pypar.raw_receive(B,numproc-1)
    
    assert Numeric.allclose(A, B)
    print "Raw communication of numeric integer arrays OK"

    # Real arrays
    #
    A = Numeric.array(range(N)).astype('f')
    B = Numeric.zeros(N).astype('f')    
    pypar.raw_send(A,1)
    pypar.raw_receive(B,numproc-1)
    
    assert Numeric.allclose(A, B)    
    print "Raw communication of numeric real arrays OK"