def _handle_event(self): """Handle an event.""" headers, payload = childutils.listener.wait(self.stdin, self.stdout) try: self.logger.debug("Event '%s' received: %r", headers['eventname'], payload) if headers['eventname'] == PROCESS_COMMUNICATION_STDOUT: payload_raw_headers, payload_data = payload.split("\n", 1) payload_headers = childutils.get_headers(payload_raw_headers) event_type = None try: data = json.loads(payload_data) event_type = data.pop('type') method = getattr(self, 'handle_%s' % (event_type, ), None) method(payload_headers['processname'], payload_headers['groupname'], payload_headers['pid'], data) except Exception, e: # failed to parse the payload or to dispatch the event self.logger.exception( "Unable to handle event type '%s' - %r", event_type, payload) # handle the tick events to fire a check on the processes. elif headers['eventname'] == TICK_5: self.tick_count += 1 if self.tick_count * 5 >= self.interval: self.tick_count = 0 try: self.check_processes() except Exception, e: print e self.logger.exception( "Oops, failed to check the processes.")
def main(): logging.basicConfig(filename='ordered_startup.log', level=logging.DEBUG) log = logging.getLogger('ordered_startup_supervisord.main') try: config_file = None if len(sys.argv) > 1: config_file = sys.argv[1] if config_file is None: config_file = get_default_config_file() if config_file is None: print("Unable to find a config file!", file=sys.stderr) sys.exit(1) if not os.path.exists(config_file): print("Config path {} does not exist!".format(config_file), file=sys.stderr) sys.exit(1) parser = UnhosedConfigParser() parser.read(get_all_configs(config_file)) startup_plan = StartupPlan(parser) rpcinterface = childutils.getRPCInterface(os.environ) log.info("programs in order: ") for prog in startup_plan.programs: log.info(prog.name) if not startup_plan.programs[0].options.autostart: rpcinterface.supervisor.startProcess(startup_plan.programs[0].name, False) initial_start = 'STARTED' while 1: headers, payload = childutils.listener.wait() if headers['eventname'].startswith( 'PROCESS_STATE') and initial_start != 'FINISHED': pheaders = childutils.get_headers(payload) log.debug("headers = {}".format(repr(headers))) log.debug("payload = {}".format(repr(pheaders))) state = headers['eventname'][len('PROCESS_STATE_'):] start_next = False for program in startup_plan.programs: if start_next: log.info("Starting process: {}".format(program.name)) rpcinterface.supervisor.startProcess(program.name) start_next = False break if program.options.startinorder and program.name == pheaders[ 'processname'] and program.options.startnextafter == state: log.info( "Recieved process state of {} from {}, starting next process." .format(state, program.name)) start_next = True else: if start_next: log.info( "No more processes to start for initial startup, ignoring all future events." ) initial_start = 'FINISHED' #log.debug("data = {}".format(repr(pdata))) childutils.listener.ok() except: log.error("ERROR: ", exc_info=sys.exc_info())
def handle_event(self, headers, payload): event_str = self.get_event_str(headers, payload) if self.startup_done: return log.info("") log.info("New event: %s" % event_str) if headers['eventname'].startswith( 'PROCESS_STATE') and not self.startup_done: pheaders = childutils.get_headers(payload) state = process_states.process_state_event_to_string( headers['eventname']) event_process = pheaders['processname'] log.debug("Event from service '%s' (%s)" % (event_process, state)) log.debug("headers = {}".format(repr(headers))) log.debug("payload = {}".format(repr(pheaders))) self.services_handler.update_state_event(event_process, state) self.services_handler.update_proc_info_all() if self.start_first: log.info("Starting immediately: %s" % self.start_first) self.services_handler.start_service(self.start_first, wait=False) log.info("Starting ordered services") self.services_handler.update_proc_info_service( self.start_first) self.start_first = None self.start_services()
def test_handle_event(self): """Test handle_event method.""" # patch handle_heartbeat called = [] def handle_heartbeat(process_name, group_name, pid, payload): """Fake handle_heartbeat.""" called.append((process_name, group_name, pid, payload)) self.listener.handle_heartbeat = handle_heartbeat payload_dict = {u"time": time.time(), "type": "heartbeat"} raw_data = ("processname:ticker groupname:ticker pid:42\n" + json.dumps(payload_dict)) raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener " "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT" " len:%s\n" % len(raw_data)) self.stdin.write(raw_header + raw_data) self.stdin.seek(0) headers = childutils.get_headers(raw_header) self.listener._handle_event() # check self.assertEqual(1, len(called)) del payload_dict['type'] self.assertEqual(('ticker', 'ticker', '42', payload_dict), called[0]) self.assertTrue(self.handler.check_debug( "Event '%s' received: %r" % (headers['eventname'], raw_data))) # check the stdout info self.assertEqual(["READY", "RESULT 2", "OK"], self.stdout.getvalue().split("\n"))
def test_handle_event(self): """Test handle_event method.""" # patch handle_heartbeat called = [] def handle_heartbeat(process_name, group_name, pid, payload): """Fake handle_heartbeat.""" called.append((process_name, group_name, pid, payload)) self.listener.handle_heartbeat = handle_heartbeat payload_dict = {u"time": time.time(), "type": "heartbeat"} raw_data = ("processname:ticker groupname:ticker pid:42\n" + json.dumps(payload_dict)) raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener " "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT" " len:%s\n" % len(raw_data)) self.stdin.write(raw_header + raw_data) self.stdin.seek(0) headers = childutils.get_headers(raw_header) self.listener._handle_event() # check self.assertEqual(1, len(called)) del payload_dict['type'] self.assertEqual(('ticker', 'ticker', '42', payload_dict), called[0]) self.handler.assert_debug("Event '%s' received: %r" % (headers['eventname'], raw_data)) # check the stdout info self.assertEqual(["READY", "RESULT 2", "OK"], self.stdout.getvalue().split("\n"))
def _handle_event(self): """Handle an event.""" headers, payload = childutils.listener.wait(self.stdin, self.stdout) try: self.logger.debug("Event '%s' received: %r", headers['eventname'], payload) if headers['eventname'] == PROCESS_COMMUNICATION_STDOUT: payload_raw_headers, payload_data = payload.split("\n", 1) payload_headers = childutils.get_headers(payload_raw_headers) event_type = None try: data = json.loads(payload_data) event_type = data.pop('type') method = getattr(self, 'handle_%s' % (event_type,), None) method(payload_headers['processname'], payload_headers['groupname'], payload_headers['pid'], data) except Exception, e: # failed to parse the payload or to dispatch the event self.logger.exception( "Unable to handle event type '%s' - %r", event_type, payload) # handle the tick events to fire a check on the processes. elif headers['eventname'] == TICK_5: self.tick_count += 1 if self.tick_count * 5 >= self.interval: self.tick_count = 0 try: self.check_processes() except Exception, e: print e self.logger.exception( "Oops, failed to check the processes.")
def get_event_str(self, headers, payload, short=True): pheaders = childutils.get_headers(payload) pheaders.update(headers) if short: new_state = process_states.process_state_event_to_string( pheaders['eventname']) return ( "Service %s went from %s to %s" % (pheaders['processname'], pheaders['from_state'], new_state)) return "headers: %s, payload: %s" % (headers, payload)
def handle(self, headers, payload): event = headers.get('eventname') action = self.STATE_ACTIONS.get(event, None) if action is None: # Event is not supported. return self.ok() data = childutils.get_headers(payload) program_info = self.programs.get(data.get('processname'), None) if program_info is None: # We are not watching this program. return self.ok() self.log('{date} Received {event} (from {from_state})' ' for {supervisor_program},' ' sending {action} for ' '{haproxy_backend}/{haproxy_server}.\n'.format( date=datetime.now().isoformat(), event=event, from_state=data.get('from_state', '?'), action=action, **program_info)) if self.skip_until is not None: if self.skip_until < datetime.now(): self.consecutive_refused_connections = 0 self.skip_until = None self.log('WARNING: resuming event handling.') else: self.log('WARNING: skipping event handling because too many' ' connections were refused previously.') return self.ok() try: self.haproxy_control.set_server_status( program_info['haproxy_backend'], program_info['haproxy_server'], action) except HaProxyConnectionRefused, exc: self.consecutive_refused_connections += 1 if MAX_CONSECUTIVE_CONNECTION_REFUSED \ >= self.consecutive_refused_connections: self.log('ERROR: connection to HaProxy stats socket refused') return self.fail() self.log('WARNING: too many connections were refused,' ' therefore this and all future events will be' ' skipped unhandled for the next {} seconds.'.format( SKIP_TIMEOUT_AFTER_CONNECTION_REFUSED.seconds)) self.skip_until = datetime.now() \ + SKIP_TIMEOUT_AFTER_CONNECTION_REFUSED return self.ok()
def wait(self, stdin=sys.stdin, stdout=sys.stdout): self.ready(stdout) while 1: if select.select([sys.stdin], [], [])[0]: line = stdin.readline() if line is not None: sys.stderr.write("wokeup and found a line\n") break else: sys.stderr.write("wokeup from select just like that\n") headers = childutils.get_headers(line) payload = stdin.read(int(headers['len'])) return headers, payload
def main(): parser = argparse.ArgumentParser( description="Supervisor event listener to notify on process events.", epilog="Homepage: https://github.com/rahiel/supervisor-alert") parser.add_argument( "-c", "--command", help="Specify the command to process the event messages.") parser.add_argument("--telegram", help="Use telegram-send to send event messages.", action="store_true") parser.add_argument("--configure", help="configure %(prog)s", action="store_true") parser.add_argument("--show-hostname", help="show hostname in messages", action="store_true") parser.add_argument("--version", action="version", version="%(prog)s {}".format(__version__)) args = parser.parse_args() if args.configure: return configure() s = "PROCESS_STATE_" hostname = gethostname() if args.telegram: alert = telegram elif args.command: alert = partial(send, command=shlex.split(args.command)) else: raise Exception("No command specified.") while True: headers, payload = listener.wait() event_name = headers["eventname"] if event_name.startswith(s): event_name = event_name[len(s):].lower() data = get_headers(payload) # keys: from_state, pid, processname process_name = data["processname"] message = process_name + " has entered state " + event_name if args.show_hostname: message = hostname + ": " + message alert(message=message) else: listener.ok()
def wait(self, stdin=sys.stdin, stdout=sys.stdout): self.ready(stdout) while 1: if select.select([stdin], [], [])[0]: line = stdin.readline() if line is not None: self.supervisor_events_ctr += 1 self.supervisor_events_timestamp = str(UTCTimestampUsec()) break else: self.supervisor_events_error_ctr += 1 self.supervisor_events_error_timestamp = str(UTCTimestampUsec) headers = childutils.get_headers(line) payload = stdin.read(int(headers['len'])) return headers, payload
def update_state(self): headers, payload = self.wait() payload = get_headers(payload) # also useful for getting the payload :) if "PROCESS_STATE_" in headers['eventname']: nam = payload['processname'] state = headers['eventname'].replace("PROCESS_STATE_", "") if state == "STARTING": set_status(nam, 'loading', self.red) elif state == "RUNNING": set_status(nam, 'running', self.red) elif state == "STOPPING": set_status(nam, 'stopped', self.red) else: set_status(nam, 'stopped', self.red) self.ok()
def _wait_for_supervisor_event(self): """Wait for supervisor events. """ childutils.listener.ready(sys.stdout) while not self._stop_event.is_set(): try: rdfs, _, _ = select.select([sys.stdin], [], [], .5) except InterruptedError: continue if rdfs: headers = childutils.get_headers(rdfs[0].readline()) # Read the payload to make read buffer empty. _ = sys.stdin.read(int(headers['len'])) event_type = headers[EVENT_NAME_KEY] self._log('Received %s event from supervisor', event_type) return event_type raise AboutToShutdown
def main(): parser = argparse.ArgumentParser(description="Supervisor event listener to notify on process events.", epilog="Homepage: https://github.com/rahiel/supervisor-alert") parser.add_argument("-c", "--command", help="Specify the command to process the event messages.") parser.add_argument("--telegram", help="Use telegram-send to send event messages.", action="store_true") parser.add_argument("--configure", help="configure %(prog)s", action="store_true") parser.add_argument("--show-hostname", help="show hostname in messages", action="store_true") parser.add_argument("--version", action="version", version="%(prog)s {}".format(__version__)) args = parser.parse_args() if args.configure: return configure() s = "PROCESS_STATE_" hostname = gethostname() if args.telegram: alert = telegram elif args.command: alert = partial(send, command=shlex.split(args.command)) else: raise Exception("No command specified.") while True: headers, payload = listener.wait() event_name = headers["eventname"] if event_name.startswith(s): event_name = event_name[len(s):].lower() data = get_headers(payload) # keys: from_state, pid, processname process_name = data["processname"] message = process_name + " has entered state " + event_name if args.show_hostname: message = hostname + ": " + message alert(message=message) else: listener.ok()
def parse_event_headers(self, payload): header_line = payload.split('\n', 1)[0] payload_headers = childutils.get_headers(header_line) return payload_headers
def test_get_headers(self): from supervisor.childutils import get_headers line = 'a:1 b:2' result = get_headers(line) self.assertEqual(result, {'a':'1', 'b':'2'})
def test_get_headers(self): from supervisor.childutils import get_headers line = 'a:1 b:2' result = get_headers(line) self.assertEqual(result, {'a': '1', 'b': '2'})
def main(): logging.basicConfig(filename='/var/log/execstartpost-debug.log', level=logging.DEBUG) log = logging.getLogger('listener1.main') #Your Process_Name should be the only command-line argument n = len(sys.argv) if n != 2: log.error( "ERROR: You must specify the Process_Name that you want this Event Listener to monitor" ) exit(1) monitored_process = sys.argv[1] while 1: headers, payload = childutils.listener.wait() if headers['eventname'].startswith('PROCESS_STATE'): pheaders = childutils.get_headers(payload) state = headers['eventname'][len('PROCESS_STATE_'):] processname = pheaders["processname"] if processname == monitored_process and state == "RUNNING": log.debug("Upgrading MariaDB if necessary!") time.sleep(20) try: cp = subprocess.run(["mariadb-upgrade"], check=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if cp.returncode != 0: log.debug("ERROR: Running mariadb-upgrade: " + cp.returncode) else: log.debug("STDOUT: " + cp.stdout) if cp.stderr != "": log.debug("STDERR: " + cp.stderr) except: log.debug("ERROR upgrading MariaDB: " + sys.exc_info()[0]) childutils.listener.ok() exit(1) log.debug("Done upgrading MariaDB") try: log.debug("Securing MariaDB!") cp = subprocess.run( ["/usr/local/sbin/docker-mysql-poststart.sh"], check=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if cp.returncode != 0: log.debug("ERROR: docker-mysql-poststart.sh: " + cp.returncode) else: log.debug("STDOUT: " + cp.stdout) if cp.stderr != "": log.debug("STDERR: " + cp.stderr) log.debug("Done securing MariaDB") except: log.debug("ERROR securing MariaDB: " + sys.exc_info()[0]) childutils.listener.ok() exit(1) childutils.listener.ok()