def checkreload(): global subscribed if ipc.zmq() is not None: if ipc.zmq().reloadmaster == True: ipc.zmq().reloadmaster = False if reload_comm is not None: for i in xrange(1,size): reload_comm.send(['__reload__'], i) return True if is_slave(): if reload_comm.Iprobe(): msg = reload_comm.recv() if(msg[0] == '__reload__'): logging.debug('Got reload') return True elif(msg[0] == '__subscribed__'): logging.debug('Got subscribed %s' % msg[1]) subscribed = msg[1] return False
def checkreload(): global subscribed if ipc.zmq() is not None: if ipc.zmq().reloadmaster == True: ipc.zmq().reloadmaster = False if reload_comm is not None: for i in xrange(1, size): reload_comm.send(['__reload__'], i) return True if is_slave(): if reload_comm.Iprobe(): msg = reload_comm.recv() if (msg[0] == '__reload__'): logging.debug('Got reload') return True elif (msg[0] == '__subscribed__'): logging.debug('Got subscribed %s' % msg[1]) subscribed = msg[1] return False
def new_data(title, data_y, mpi_reduce=False, **kwds): """Send a new data item, which will be appended to any existing values at the interface. If mpi_reduce is True data_y will be summed over all the slaves. All keywords pairs given will also be transmitted and available at the interface.""" global sent_time _check_type(title, data_y) event_id = evt.event_id() # If send_rate is given limit the send rate to it if 'send_rate' in kwds and kwds['send_rate'] is not None: send_rate = float(kwds['send_rate']) / ipc.mpi.nr_workers() cur_time = event_id if title in sent_time: send_probability = (cur_time - sent_time[title]) * send_rate else: send_probability = 1 sent_time[title] = cur_time if numpy.random.random() > send_probability: # do not send the data return if (ipc.mpi.is_slave()): if (mpi_reduce): ipc.mpi.send_reduce(title, 'new_data', data_y, event_id, **kwds) else: m = hashlib.md5() m.update(title.encode('UTF-8')) if m.digest() in ipc.mpi.subscribed: ipc.mpi.send( title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) else: logging.debug('%s not subscribed, not sending' % (title)) else: ipc.zmq().send(title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) logging.debug("Sending data on source '%s'" % title) if data_conf[title]["data_type"] == "scalar": ipc.influx.write(title, data_y, event_id, kwds)
def new_data(title, data_y, mpi_reduce=False, **kwds): """Send a new data item, which will be appended to any existing values at the interface. If mpi_reduce is True data_y will be summed over all the slaves. All keywords pairs given will also be transmitted and available at the interface.""" global sent_time _check_type(title, data_y) event_id = evt.event_id() # If send_rate is given limit the send rate to it if 'send_rate' in kwds and kwds['send_rate'] is not None: send_rate = float(kwds['send_rate'])/ipc.mpi.nr_workers() cur_time = event_id if title in sent_time: send_probability = (cur_time-sent_time[title])*send_rate else: send_probability = 1 sent_time[title] = cur_time #print 'send_probability', send_probability if numpy.random.random() > send_probability: # do not send the data return if(ipc.mpi.is_slave()): if(mpi_reduce): ipc.mpi.send_reduce(title, 'new_data', data_y, event_id, **kwds) else: m = hashlib.md5() m.update(bytes(title)) if m.digest() in ipc.mpi.subscribed: ipc.mpi.send(title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) else: logging.debug('%s not subscribed, not sending' % (title)) else: ipc.zmq().send(title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) logging.debug("Sending data on source '%s'" % title) if data_conf[title]["data_type"] == "scalar": ipc.influx.write(title, data_y, event_id, kwds)
def new_data(title, data_y, mpi_reduce=False, **kwds): """Send a new data item, which will be appended to any existing values at the interface. If mpi_reduce is True data_y will be summed over all the slaves. All keywords pairs given will also be transmitted and available at the interface.""" _check_type(title, data_y) event_id = evt.event_id() if(ipc.mpi.is_slave()): if(mpi_reduce): ipc.mpi.send_reduce(title, 'new_data', data_y, event_id, **kwds) else: m = hashlib.md5() m.update(bytes(title)) if m.digest() in ipc.mpi.subscribed: ipc.mpi.send(title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) else: logging.debug('%s not subscribed, not sending' % (title)) else: ipc.zmq().send(title, [ipc.uuid, 'new_data', title, data_y, event_id, kwds]) logging.debug("Sending data on source '%s'" % title)
def master_loop(): """Run the main loop on the master process. It retransmits all received messages using its zmqserver and handles any possible reductions.""" status = MPI.Status() msg = comm.recv(None, MPI.ANY_SOURCE, status = status) if(msg[0] == '__data_conf__'): ipc.broadcast.data_conf.update(msg[1]) elif(msg[0] == '__reduce__'): cmd = msg[1] if(msg[2] != ()): data_y = numpy.zeros(msg[1]) else: data_y = 0 incomingdata = msg[3] getback = msg[4] source = status.Get_source() # This indicates that we really should have an object for the state if cmd not in reducedata: reducedata[cmd] = {} reducedata[cmd][source] = incomingdata if getback: cnt = 0 for data in reducedata[cmd]: data_y = data_y + reducedata[cmd][data] comm.send(data_y, source) elif(msg[0] == '__exit__'): slavesdone.append(True) logging.warning("Slave with rank = %d reports to be done" %msg[1]) if len(slavesdone) == nr_workers(): MPI.Finalize() return True else: # Inject a proper UUID msg[1][0] = ipc.uuid ipc.zmq().send(msg[0], msg[1])
def master_loop(): """Run the main loop on the master process. It retransmits all received messages using its zmqserver and handles any possible reductions.""" status = MPI.Status() msg = comm.recv(None, MPI.ANY_SOURCE, status=status) if (msg[0] == '__data_conf__'): ipc.broadcast.data_conf.update(msg[1]) elif (msg[0] == '__reduce__'): cmd = msg[1] if (msg[2] != ()): data_y = numpy.zeros(msg[1]) else: data_y = 0 incomingdata = msg[3] getback = msg[4] source = status.Get_source() # This indicates that we really should have an object for the state if cmd not in reducedata: reducedata[cmd] = {} reducedata[cmd][source] = incomingdata if getback: cnt = 0 for data in reducedata[cmd]: data_y = data_y + reducedata[cmd][data] comm.send(data_y, source) elif (msg[0] == '__exit__'): slavesdone.append(True) logging.info("Slave with rank = %d reports to be done" % msg[1]) if len(slavesdone) == nr_slaves(): MPI.Finalize() return True else: # Inject a proper UUID msg[1][0] = ipc.uuid ipc.zmq().send(msg[0], msg[1])
def master_loop(): """Run the main loop on the master process. It retransmits all received messages using its zmqserver and handles any possible reductions.""" msg = comm.recv(None, MPI.ANY_SOURCE) if(msg[0] == '__data_conf__'): ipc.broadcast.data_conf.update(msg[1]) elif(msg[0] == '__reduce__'): title = msg[1] cmd = msg[2] if(msg[3] != ()): data_y = numpy.zeros(msg[3]) else: data_y = 0 data_x = msg[4] kwds = msg[5] data_y = comm.reduce(data_y) if(isinstance(data_y, numbers.Number)): print "[%s] - %g" %(title, data_y) ipc.zmq().send(title, [ipc.uuid, cmd, title, data_y, data_x, kwds]) else: # Inject a proper UUID msg[1][0] = ipc.uuid ipc.zmq().send(msg[0], msg[1])