def schedule(cls, msgs_to_process=None, timers_to_process=None): if msgs_to_process is None: msgs_to_process = 32768 if timers_to_process is None: timers_to_process = 32768 while cls._work_to_do(): while cls.queue: msg = cls.queue.popleft() print(msg) if isinstance(msg, ResponseMessage): try: reqmsg = msg.response_to.original_msg except: reqmsg = msg.response_to cls.remove_req_timer(reqmsg) msg.to_node.rcvmsg(msg) msgs_to_process -= 1 if msgs_to_process == 0: return if TimerManager.pending_count() > 0 and timers_to_process > 0: TimerManager.pop_timer() timers_to_process -= 1 if timers_to_process == 0: return
def schedule(cls, msgs_to_process=None, timers_to_process=None): """Schedule given number of pending messages""" if msgs_to_process is None: msgs_to_process = 32768 if timers_to_process is None: timers_to_process = 32768 while cls._work_to_do(): _logger.info( "Start of schedule: %d (limit %d) pending messages, %d (limit %d) pending timers", len(cls.queue), msgs_to_process, TimerManager.pending_count(), timers_to_process) # Process all the queued up messages (which may enqueue more along the way) while cls.queue: msg = cls.queue.popleft() if msg.to_node.failed: _logger.info("Drop %s->%s: %s as destination down", msg.from_node, msg.to_node, msg) History.add("drop", msg) elif not Framework.reachable(msg.from_node, msg.to_node): _logger.info("Drop %s->%s: %s as route down", msg.from_node, msg.to_node, msg) History.add("cut", msg) else: _logger.info("Dequeue %s->%s: %s", msg.from_node, msg.to_node, msg) if isinstance(msg, ResponseMessage): # figure out the original request this is a response to try: reqmsg = msg.response_to.original_msg except Exception: reqmsg = msg.response_to # cancel any timer associated with the original request cls.remove_req_timer(reqmsg) History.add("deliver", msg) msg.to_node.rcvmsg(msg) msgs_to_process = msgs_to_process - 1 if msgs_to_process == 0: return # No pending messages; potentially pop a (single) timer if TimerManager.pending_count() > 0 and timers_to_process > 0: # Pop the first pending timer; this may enqueue work TimerManager.pop_timer() timers_to_process = timers_to_process - 1 if timers_to_process == 0: return
def schedule(cls, msgs_to_process=None, timers_to_process=None): """Schedule given number of pending messages""" if msgs_to_process is None: msgs_to_process = 32768 if timers_to_process is None: timers_to_process = 32768 while cls._work_to_do(): _logger.info("Start of schedule: %d (limit %d) pending messages, %d (limit %d) pending timers", len(cls.queue), msgs_to_process, TimerManager.pending_count(), timers_to_process) # Process all the queued up messages (which may enqueue more along the way) while cls.queue: #modify msg, con = cls.queue.popleft() ############################### if msg.to_node.failed: _logger.info("Drop %s->%s: %s as destination down", msg.from_node, msg.to_node, msg) History.add("drop", msg) elif not Framework.reachable(msg.from_node, msg.to_node): _logger.info("Drop %s->%s: %s as route down", msg.from_node, msg.to_node, msg) History.add("cut", msg) else: _logger.info("Dequeue %s->%s: %s", msg.from_node, msg.to_node, msg) if isinstance(msg, ResponseMessage): # figure out the original request this is a response to try: reqmsg = msg.response_to.original_msg except Exception: reqmsg = msg.response_to # cancel any timer associated with the original request cls.remove_req_timer(reqmsg) History.add("deliver", msg) msgs_to_process = msgs_to_process - 1 if msgs_to_process == 0: return # No pending messages; potentially pop a (single) timer if TimerManager.pending_count() > 0 and timers_to_process > 0: # Pop the first pending timer; this may enqueue work TimerManager.pop_timer() timers_to_process = timers_to_process - 1 if timers_to_process == 0: return
def schedule(cls, msgs_to_process=None, timers_to_process=None): """Schedule given number of pending messages""" if msgs_to_process is None: msgs_to_process = 32768 if timers_to_process is None: timers_to_process = 32768 while cls._work_to_do(): _logger.info("Start of schedule: %d (limit %d) pending messages, %d (limit %d) pending timers", len(cls.queue), msgs_to_process, TimerManager.pending_count(), timers_to_process) # Process all the queued up messages (which may enqueue more along the way) while cls.queue: msg = cls.queue.popleft() if msg.to_node in cls.block: _logger.info("Drop %s->%s: %s as destination down", msg.from_node, msg.to_node, msg) History.add("drop", msg) else: try: c = zerorpc.Client(timeout=1) c.connect('tcp://' + msg.to_node) except zerorpc.TimeoutExpired: _logger.info("Drop %s->%s: %s as destination down", msg.from_node, msg.to_node, msg) History.add("drop", msg) continue if not Framework.reachable(msg.from_node, msg.to_node): _logger.info("Drop %s->%s: %s as route down", msg.from_node, msg.to_node, msg) History.add("cut", msg) _logger.info("Dequeue %s->%s: %s", msg.from_node, msg.to_node, msg) elif isinstance(msg, ResponseMessage): # figure out the original request this is a response to try: reqmsg = msg.response_to.original_msg except Exception: reqmsg = msg.response_to # cancel any timer associated with the original request cls.remove_req_timer(reqmsg) History.add("deliver", msg) m = pickle.dumps(msg) try: c.rcvmsg(m) c.close() except: print 'time out' cls.queue.append(msg) print cls.block for node in cls.nodeList: if node != msg.to_node: bmsg = BlockRsp(from_node=msg.from_node, to_node=node, key=msg.to_node, msg_id=None) cls.queue.append(bmsg) msgs_to_process = msgs_to_process - 1 if msgs_to_process == 0: return # No pending messages; potentially pop a (single) timer if TimerManager.pending_count() > 0 and timers_to_process > 0: # Pop the first pending timer; this may enqueue work TimerManager.pop_timer() timers_to_process = timers_to_process - 1 if timers_to_process == 0: return