def handle_recv(self, data): """called each time circusd sends an event""" # maintains a periodic callback to compute mem and cpu consumption for # each pid. logger.debug('Received an event from circusd: %s' % str(data)) topic, msg = data try: topic = s(topic) watcher = topic.split('.')[1:-1][0] action = topic.split('.')[-1] msg = json.loads(msg) if action in ('reap', 'kill'): # a process was reaped pid = msg['process_pid'] self.remove_pid(watcher, pid) elif action == 'spawn': # a process was added pid = msg['process_pid'] self._append_pid(watcher, pid) elif action == 'stop': # the whole watcher was stopped. self.stop_watcher(watcher) else: logger.debug('Unknown action: %r' % action) logger.debug(msg) except Exception: logger.exception('Failed to handle %r' % msg)
def handle_recv(self, data): """called each time circusd sends an event""" # maintains a periodic callback to compute mem and cpu consumption for # each pid. logger.debug("Received an event from circusd: %s" % data) topic, msg = data try: __, watcher, action = topic.split(".") msg = json.loads(msg) if action == "start" or (action != "start" and self.stopped): self._init() if action in ("reap", "kill"): # a process was reaped pid = msg["process_pid"] self.remove_pid(watcher, pid) elif action == "spawn": pid = msg["process_pid"] self.append_pid(watcher, pid) elif action == "start": self._init() elif action == "stop": self.stop() else: logger.debug("Unknown action: %r" % action) logger.debug(msg) except Exception: logger.exception("Failed to handle %r" % msg)
def handle_recv(self, data): """called each time circusd sends an event""" # maintains a periodic callback to compute mem and cpu consumption for # each pid. logger.debug('Received an event from circusd: %s' % data) topic, msg = data try: __, watcher, action = topic.split('.') msg = json.loads(msg) if action == 'start' or (action != 'start' and self.stopped): self._init() if action in ('reap', 'kill'): # a process was reaped pid = msg['process_pid'] self.remove_pid(watcher, pid) elif action == 'spawn': pid = msg['process_pid'] self.append_pid(watcher, pid) elif action == 'start': self._init() elif action == 'stop': self.stop() else: logger.debug('Unknown action: %r' % action) logger.debug(msg) except Exception: logger.exception('Failed to handle %r' % msg)
def collect_stats(self): aggregate = {} # sending by pids for pid in self.streamer.get_pids(self.name): name = None if self.name == 'circus': if pid in self.streamer.circus_pids: name = self.streamer.circus_pids[pid] try: info = util.get_info(pid) aggregate[pid] = info info['subtopic'] = pid info['name'] = name yield info except util.NoSuchProcess: # the process is gone ! pass except Exception as e: logger.exception('Failed to get info for %d. %s' % (pid, str(e))) # now sending the aggregation yield self._aggregate(aggregate)
def handle_recv(self, data): """called each time circusd sends an event""" # maintains a periodic callback to compute mem and cpu consumption for # each pid. logger.debug('Received an event from circusd: %s' % data) topic, msg = data try: watcher = topic.split('.')[1:-1][0] action = topic.split('.')[-1] msg = json.loads(msg) if action in ('reap', 'kill'): # a process was reaped pid = msg['process_pid'] self.remove_pid(watcher, pid) elif action == 'spawn': # a process was added pid = msg['process_pid'] self._append_pid(watcher, pid) elif action == 'stop': # the whole watcher was stopped. self.stop_watcher(watcher) else: logger.debug('Unknown action: %r' % action) logger.debug(msg) except Exception: logger.exception('Failed to handle %r' % msg)
def call_hook(self, hook_name, **kwargs): """Call a hook function""" hook_kwargs = { 'watcher': self, 'arbiter': self.arbiter, 'hook_name': hook_name } hook_kwargs.update(kwargs) if hook_name in self.hooks: try: result = self.hooks[hook_name](**hook_kwargs) self.notify_event("hook_success", { "name": hook_name, "time": time.time() }) except Exception as error: logger.exception('Hook %r failed' % hook_name) result = hook_name in self.ignore_hook_failure self.notify_event("hook_failure", { "name": hook_name, "time": time.time(), "error": str(error) }) return result else: return True
def call_hook(self, hook_name): """Call a hook function""" kwargs = {"watcher": self, "arbiter": self.arbiter, "hook_name": hook_name} if hook_name in self.hooks: try: result = self.hooks[hook_name](**kwargs) error = None self.notify_event("hook_success", {"name": hook_name, "time": time.time()}) except Exception, error: logger.exception("Hook %r failed" % hook_name) result = hook_name in self.ignore_hook_failure self.notify_event("hook_failure", {"name": hook_name, "time": time.time(), "error": str(error)}) return result
def collect_stats(self, watcher, pids): aggregate = {} # sending by pids for pid in pids: try: info = util.get_info(pid) aggregate[pid] = info yield (watcher, pid, info) except util.NoSuchProcess: # the process is gone ! pass except Exception, e: logger.exception('Failed to get info for %d. %s' % (pid, str(e)))
def clean_stop(watcher, arbiter, hook_name, pid, signum, **kwargs): if len(watcher.processes) > watcher.numprocesses and signum == signal.SIGQUIT: name = watcher.name started = watcher.processes[pid].started newer_pids = [p for p, w in watcher.processes.items() if p != pid and w.started > started] # if the one being stopped is actually the newer one, just do it if len(newer_pids) < watcher.numprocesses: return True wid = watcher.processes[pid].wid logger.info('%s pausing', name) watcher.send_signal(pid, signal.SIGTSTP) try: wait_for_workers(name, wid, 'paused') logger.info('%s workers idle', name) except Exception as e: logger.exception('trouble pausing %s: %s', name, e) return True
def call_hook(self, hook_name, **kwargs): """Call a hook function""" hook_kwargs = {'watcher': self, 'arbiter': self.arbiter, 'hook_name': hook_name} hook_kwargs.update(kwargs) if hook_name in self.hooks: try: result = self.hooks[hook_name](**hook_kwargs) error = None self.notify_event("hook_success", {"name": hook_name, "time": time.time()}) except Exception, error: logger.exception('Hook %r failed' % hook_name) result = hook_name in self.ignore_hook_failure self.notify_event("hook_failure", {"name": hook_name, "time": time.time(), "error": str(error)}) return result
def clean_stop(watcher, arbiter, hook_name, pid, signum, **kwargs): if len(watcher.processes ) > watcher.numprocesses and signum == signal.SIGQUIT: name = watcher.name started = watcher.processes[pid].started newer_pids = [ p for p, w in watcher.processes.items() if p != pid and w.started > started ] # if the one being stopped is actually the newer one, just do it if len(newer_pids) < watcher.numprocesses: return True wid = watcher.processes[pid].wid logger.info('%s pausing', name) watcher.send_signal(pid, signal.SIGTSTP) try: wait_for_workers(name, wid, 'paused') logger.info('%s workers idle', name) except Exception as e: logger.exception('trouble pausing %s: %s', name, e) return True
def run(self): self.running = True results = self.streamer.results logger.debug('Starting the Publisher') while self.running: try: watcher, name, pid, stat = results.get(timeout=self.delay) topic = b'stat.%s' % str(watcher) if pid is not None: topic += '.%d' % pid stat['name'] = name self.socket.send_multipart([topic, json.dumps(stat)]) except zmq.ZMQError: if self.socket.closed: self.running = False else: raise except Queue.Empty: pass except Exception: logger.exception('Failed to some data from the queue')
def collect_stats(self): aggregate = {} # sending by pids for pid in self.streamer.get_pids(self.name): name = None if self.name == "circus": if pid in self.streamer.circus_pids: name = self.streamer.circus_pids[pid] try: info = util.get_info(pid) aggregate[pid] = info info["subtopic"] = pid info["name"] = name yield info except util.NoSuchProcess: # the process is gone ! pass except Exception, e: logger.exception("Failed to get info for %d. %s" % (pid, str(e)))
def run(self): self.running = True while self.running: aggregate = {} # sending by pids for name, pid in self._get_pids(): try: info = util.get_info(pid, interval=0.0) aggregate[pid] = info except util.NoSuchProcess: # the process is gone ! pass except Exception: logger.exception('Failed to get info for %d' % pid) else: self.results.put((self.name, name, pid, info)) # now sending the aggregation self.results.put( (self.name, None, None, self._aggregate(aggregate))) # sleep for accuracy time.sleep(self.interval)
def handle_recv(self, data): topic, msg = data try: __, watcher, action = topic.split('.') msg = json.loads(msg) if action != 'start' and self.stopped: self._init() if action in ('reap', 'kill'): # a process was reaped pid = msg['process_pid'] self.remove_pid(watcher, pid) elif action == 'spawn': pid = msg['process_pid'] self.append_pid(watcher, pid) elif action == 'start': self._init() elif action == 'stop': # nothing to do self.stopped = True else: logger.debug('Unknown action: %r' % action) logger.debug(msg) except Exception: logger.exception('Failed to treat %r' % msg)
def run(self): self.running = True while self.running: aggregate = {} # sending by pids for name, pid in self._get_pids(): try: info = util.get_info(pid, interval=0.0) aggregate[pid] = info self.results.put((self.name, name, pid, info)) except util.NoSuchProcess: # the process is gone ! pass except Exception, e: logger.exception('Failed to get info for %d. %s' % (pid, str(e))) # now sending the aggregation self.results.put((self.name, None, None, self._aggregate(aggregate))) # sleep for accuracy time.sleep(self.interval)