def runWorker(comm, histogramSize, gatherInterval, numEventsScaleFactor):
    """worker shared memory example for reducing a histogram to a viewer
   
    Maintains a histogram. Continues to update the communicator and reduce until
    no more workers participate in the communicator.

    Assumes Each reduce is a histogram over only the previous worker events since 
    the last reduce.

    ARGS:
      comm - initial communicator to use
      histogramSize - size of historgram to reduce
      finalReducedHistFilename - filename for numpy .npy file for final histogram
    """
    sharedMemDs = MockDataSource(numEventsScaleFactor)
    localHistogram = np.zeros(histogramSize, np.int64)
    idx = None
    for idx, evt in enumerate(sharedMemDs.events()):
        localHistogram[idx % histogramSize] += 1
        if (idx % gatherInterval) == 0:
            comm = collectiveCommUpdate(comm, stayInComm=True)
            workerReduce(comm, localHistogram)
            localHistogram[:] = 0

    # check for last reduce
    if (idx is not None) and (idx % gatherInterval) != 0:
        comm = collectiveCommUpdate(comm, stayInComm=True)
        workerReduce(comm, localHistogram)

    # remove self from collective communication
    dprint("before final comm update to remove self")
    collectiveCommUpdate(comm, stayInComm=False)
    dprint("after final comm update")
def runViewerReduceHistorgram(comm, histogramSize, finalReducedHistFilename):
    """Viewer shared memory example for reducing a histogram from worker ranks.
   
    Maintains a histogram. Continues to update the communicator and reduce until
    no more workers participate in the communicator.

    Assumes Each reduce is a histogram over only the previous worker events since 
    the last reduce.

    ARGS:
      comm - initial communicator to use
      histogramSize - size of historgram to reduce
      finalReducedHistFilename - filename for numpy .npy file for final histogram
    """
    assert comm.Get_rank() == 0
    histogram = np.zeros(histogramSize, np.int64)
    while True:
        comm = collectiveCommUpdate(comm, stayInComm=True)

        # check if only the viewer is left in communicator and exit if so
        if comm.Get_size() == 1:
            break

        localHistogram = np.zeros(histogramSize, np.int64)
        recvHistogram = np.zeros(histogramSize, np.int64)
        dprint("before Reduce")
        comm.Reduce(sendbuf=[localHistogram, MPI.INT64_T], recvbuf=[recvHistogram, MPI.INT64_T], op=MPI.SUM, root=0)
        histogram[:] += recvHistogram[:]
        print "rank=0 After Reduce. reduced histogram: %s" % histogram
    np.save(file(finalReducedHistFilename, "w"), histogram)
    print "rank=0 saved final reduced historgram=%s in file: %s" % (histogram, finalReducedHistFilename)
def collectiveCommUpdate(comm, stayInComm):    
    '''collectively update a communicator based on which ranks continue.

    Note: requires rank 0 to be the root/viewer/master. 
    Rank 0 must always pass stayInComm=True

    ARGS:
      comm - MPI communicator
      stayInComm - Bool - true if this rank will stay in the communicator

    RETURN:
      comm - a potentially new communicator based on ranks that stayed in,
             however will be existing comm if no change.
             comm will always have at least one rank in it (rank 0)

    Example:
      comm = collectiveCommUpdate(comm, True)
    '''
    dprint("collectiveCommUpdate start: before gathering stayInComm at root. Sending %r" % stayInComm)
    if comm.Get_rank()==0:
        assert stayInComm == True  # root always stays in comm
        recvObject = comm.gather(sendobj=True, root=0)
    else:
        comm.gather(sendobj=stayInComm, root=0)

    dprint("  after gather. Before bcast to inform all in old communicatior of ranks to drop")
    if comm.Get_rank()==0:
        droppedRanks = [idx for idx, hasData in enumerate(recvObject) if not hasData]
        comm.bcast(obj=droppedRanks, root=0)
    else:
        droppedRanks = comm.bcast()

    dprint("  after bcast. droppedRanks=%r" % droppedRanks)
    if len(droppedRanks)==0:
        return comm

    assert 0 not in droppedRanks
    group = comm.Get_group()
    newGroup = group.Excl(droppedRanks)
    dprint("  before comm.Create(newGroup) where newGroup excludes ranks %s" % droppedRanks)
    newComm = comm.Create(newGroup)
    dprint("  after comm.Create(newGroup)")
    return newComm
def workerReduce(comm, localHistogram):
    dprint("before Reduce sending=%s" % localHistogram)
    comm.Reduce(sendbuf=[localHistogram, MPI.INT64_T], recvbuf=[None, MPI.INT64_T], op=MPI.SUM, root=0)
    dprint("after reduce")
 def __init__(self, numEventsScaleFactor=1):
     self.remainingEvents = max(0, numEventsScaleFactor * (MPI.COMM_WORLD.Get_rank() - 1))
     dprint("MockDataSource with %d events" % self.remainingEvents)