def handle_process(self, task_id, process_id): all_processes = {} current_run = self._observer.process(task_id, process_id) if not current_run: HttpServer.abort(404, 'Invalid task/process combination: %s/%s' % (task_id, process_id)) process = self._observer.process_from_name(task_id, process_id) if process is None: msg = 'Could not recover process: %s/%s' % (task_id, process_id) log.error(msg) HttpServer.abort(404, msg) current_run_number = current_run['process_run'] all_processes[current_run_number] = current_run for run in range(current_run_number): all_processes[run] = self._observer.process(task_id, process_id, run) template = { 'task_id': task_id, 'process': { 'name': process_id, 'status': all_processes[current_run_number]["state"], 'cmdline': process.cmdline().get() }, } template['process'].update(**all_processes[current_run_number].get('used', {})) template['runs'] = all_processes log.debug('Rendering template is: %s' % template) return template
def handle_process(self, task_id, process_id): all_processes = {} current_run = self._observer.process(task_id, process_id) if not current_run: HttpServer.abort( 404, 'Invalid task/process combination: %s/%s' % (task_id, process_id)) process = self._observer.process_from_name(task_id, process_id) if process is None: msg = 'Could not recover process: %s/%s' % (task_id, process_id) log.error(msg) HttpServer.abort(404, msg) current_run_number = current_run['process_run'] all_processes[current_run_number] = current_run for run in range(current_run_number): all_processes[run] = self._observer.process( task_id, process_id, run) template = { 'task_id': task_id, 'process': { 'name': process_id, 'status': all_processes[current_run_number]["state"], 'cmdline': process.cmdline().get() }, } template['process'].update( **all_processes[current_run_number].get('used', {})) template['runs'] = all_processes log.debug('Rendering template is: %s', template) return template
def __init__( self, iface, zkport, request_handler, reply_handler=None, event_handler=None): HttpServer.__init__(self) config = SnifferConfig(iface=iface) config.zookeeper_port = zkport config.update_filter() self._sniffer = Sniffer(config, request_handler, reply_handler, event_handler) self._sniffer.start()
def handle_vars(self, var=None): HttpServer.set_content_type('text/plain; charset=iso-8859-1') samples = self._monitor.sample() if var is None: return '\n'.join( '%s %s' % (key, val) for key, val in sorted(samples.items())) else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def handle_vars(self, var=None): HttpServer.set_content_type('text/plain; charset=iso-8859-1') samples = self._monitor.sample() if var is None: return '\n'.join('%s %s' % (key, val) for key, val in sorted(samples.items())) else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def handle_vars(self, var = None): samples = self._monitor.sample() if var is None: body='<br>'.join( '%s %s' % (key, val) for key, val in samples.iteritems()) return '<html><body><pre>%s</pre></body></html>' % body else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def handle_vars(self, var=None): samples = self._monitor.sample() if var is None: body = '<br>'.join('%s %s' % (key, val) for key, val in samples.items()) return '<html><body><pre>%s</pre></body></html>' % body else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def handle_vars(self, var=None): HttpServer.set_content_type('text/plain; charset=iso-8859-1') filtered = self._parse_filtered_arg() samples = self._monitor.sample() if var is None and filtered and self._stats_filter: return '\n'.join('%s %s' % (key, val) for key, val in sorted(samples.items()) if not self._stats_filter.match(key)) elif var is None: return '\n'.join('%s %s' % (key, val) for key, val in sorted(samples.items())) else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def handle_vars(self, var=None): HttpServer.set_content_type('text/plain; charset=iso-8859-1') filtered = self._parse_filtered_arg() samples = self._monitor.sample() if var is None and filtered and self._stats_filter: return '\n'.join( '%s %s' % (key, val) for key, val in sorted(samples.items()) if not self._stats_filter.match(key)) elif var is None: return '\n'.join( '%s %s' % (key, val) for key, val in sorted(samples.items())) else: if var in samples: return samples[var] else: HttpServer.abort(404, 'Unknown exported variable')
def configure_server(task_observer): bottle_wrapper = BottleObserver(task_observer) root_metrics = RootMetrics() server = HttpServer() server.mount_routes(bottle_wrapper) server.mount_routes(DiagnosticsEndpoints()) server.mount_routes(VarsEndpoint()) register_build_properties(root_metrics) register_diagnostics(root_metrics) return server
def handle_task(self, task_id): task = self.get_task(task_id) processes = self._observer.processes([task_id]) if not processes.get(task_id, None): HttpServer.abort(404, "Unknown task_id: %s" % task_id) processes = processes[task_id] state = self._observer.state(task_id) return dict( task_id=task_id, task=task, statuses=self._observer.task_statuses(task_id), user=task["user"], ports=task["ports"], processes=processes, chroot=state.get("sandbox", ""), launch_time=state.get("launch_time", 0), hostname=state.get("hostname", "localhost"), )
def handle_task(self, task_id): task = self.get_task(task_id) processes = self._observer.processes([task_id]) if not processes.get(task_id, None): HttpServer.abort(404, 'Unknown task_id: %s' % task_id) processes = processes[task_id] state = self._observer.state(task_id) return dict( task_id=task_id, task=task, statuses=self._observer.task_statuses(task_id), user=task['user'], ports=task['ports'], processes=processes, chroot=state.get('sandbox', ''), launch_time=state.get('launch_time', 0), hostname=state.get('hostname', 'localhost'), )
def handle_task(self, task_id): task = self.get_task(task_id) processes = self._observer.processes([task_id]) if not processes.get(task_id, None): HttpServer.abort(404, 'Unknown task_id: %s' % task_id) processes = processes[task_id] state = self._observer.state(task_id) return dict( task_id = task_id, task = task, statuses = self._observer.task_statuses(task_id), user = task['user'], ports = task['ports'], processes = processes, chroot = state.get('sandbox', ''), launch_time = state.get('launch_time', 0), hostname = state.get('hostname', 'localhost'), )
def handle_process(self, task_id, process_id): all_processes = {} current_run = self._observer.process(task_id, process_id) if not current_run: HttpServer.abort( 404, 'Invalid task/process combination: %s/%s' % (task_id, process_id)) process = self._observer.process_from_name(task_id, process_id) if process is None: msg = 'Could not recover process: %s/%s' % (task_id, process_id) log.error(msg) HttpServer.abort(404, msg) current_run_number = current_run['process_run'] all_processes[current_run_number] = current_run for run in range(current_run_number): all_processes[run] = self._observer.process( task_id, process_id, run) def convert_process_tuple(run_tuple): process_tuple = dict(state=run_tuple['state']) if 'start_time' in run_tuple: process_tuple.update(start_time=run_tuple['start_time']) if 'stop_time' in run_tuple: process_tuple.update(stop_time=run_tuple['stop_time']) return process_tuple template = { 'task_id': task_id, 'process': { 'name': process_id, 'status': all_processes[current_run_number]["state"], 'cmdline': process.cmdline().get() }, } template['process'].update( **all_processes[current_run_number].get('used', {})) template['runs'] = dict((run, convert_process_tuple(run_tuple)) for run, run_tuple in all_processes.items()) log.info('Rendering template is: %s' % template) return template
def test_basic_server_error_binding(): BREAKAGE = '*****breakage*****' class MyServer(object): @HttpServer.route('/broken') def broken_handler(self): raise Exception('unhandled exception!') @HttpServer.error(404) @HttpServer.error(500) def error_handler(self, error): return BREAKAGE server = HttpServer() mserver = MyServer() server.mount_routes(mserver) # Test 404 error handling. resp = server.app(make_request('/nonexistent_page'), functools.partial(response_asserter, 404)) assert resp[0] == BREAKAGE # Test 500 error handling. resp = server.app(make_request('/broken'), functools.partial(response_asserter, 500)) assert resp[0] == BREAKAGE
def main(args, opts): if args: print("ERROR: unrecognized arguments: %s\n" % (" ".join(args)), file=sys.stderr) app.help() sys.exit(1) root_server = HttpServer() root_server.mount_routes(DiagnosticsEndpoints()) task_observer = TaskObserver(opts.root) task_observer.start() bottle_wrapper = BottleObserver(task_observer) root_server.mount_routes(bottle_wrapper) def run(): root_server.run('0.0.0.0', opts.port, 'cherrypy') et = ExceptionalThread(target=run) et.daemon = True et.start() et.join()
def handle_process(self, task_id, process_id): all_processes = {} current_run = self._observer.process(task_id, process_id) if not current_run: HttpServer.abort(404, "Invalid task/process combination: %s/%s" % (task_id, process_id)) process = self._observer.process_from_name(task_id, process_id) if process is None: msg = "Could not recover process: %s/%s" % (task_id, process_id) log.error(msg) HttpServer.abort(404, msg) current_run_number = current_run["process_run"] all_processes[current_run_number] = current_run for run in range(current_run_number): all_processes[run] = self._observer.process(task_id, process_id, run) def convert_process_tuple(run_tuple): process_tuple = dict(state=run_tuple["state"]) if "start_time" in run_tuple: process_tuple.update(start_time=run_tuple["start_time"]) if "stop_time" in run_tuple: process_tuple.update(stop_time=run_tuple["stop_time"]) return process_tuple template = { "task_id": task_id, "process": { "name": process_id, "status": all_processes[current_run_number]["state"], "cmdline": process.cmdline().get(), }, } template["process"].update(**all_processes[current_run_number].get("used", {})) template["runs"] = dict((run, convert_process_tuple(run_tuple)) for run, run_tuple in all_processes.items()) log.info("Rendering template is: %s" % template) return template
def handle_process(self, task_id, process_id): all_processes = {} current_run = self._observer.process(task_id, process_id) if not current_run: HttpServer.abort(404, 'Invalid task/process combination: %s/%s' % (task_id, process_id)) process = self._observer.process_from_name(task_id, process_id) if process is None: msg = 'Could not recover process: %s/%s' % (task_id, process_id) log.error(msg) HttpServer.abort(404, msg) current_run_number = current_run['process_run'] all_processes[current_run_number] = current_run for run in range(current_run_number): all_processes[run] = self._observer.process(task_id, process_id, run) def convert_process_tuple(run_tuple): process_tuple = dict(state = run_tuple['state']) if 'start_time' in run_tuple: process_tuple.update(start_time = run_tuple['start_time']) if 'stop_time' in run_tuple: process_tuple.update(stop_time = run_tuple['stop_time']) return process_tuple template = { 'task_id': task_id, 'process': { 'name': process_id, 'status': all_processes[current_run_number]["state"], 'cmdline': process.cmdline().get() }, } template['process'].update(**all_processes[current_run_number].get('used', {})) template['runs'] = dict((run, convert_process_tuple(run_tuple)) for run, run_tuple in all_processes.items()) log.info('Rendering template is: %s' % template) return template
def handle_main(self, type=None, offset=None, num=None): if type not in (None, 'all', 'finished', 'active'): HttpServer.abort(404, 'Invalid task type: %s' % type) if offset is not None: try: offset = int(offset) except ValueError: HttpServer.abort(404, 'Invalid offset: %s' % offset) if num is not None: try: num = int(num) except ValueError: HttpServer.abort(404, 'Invalid count: %s' % num) return self._observer.main(type, offset, num)
def main(args, options): log.info("Options in use: %s", options) if not options.api_port: app.error('Must specify --port') if not options.mesos_master: app.error('Must specify --mesos_master') if not options.framework_user: app.error('Must specify --framework_user') if not options.executor_uri: app.error('Must specify --executor_uri') if not options.executor_cmd: app.error('Must specify --executor_cmd') if not options.zk_url: app.error('Must specify --zk_url') if not options.admin_keypath: app.error('Must specify --admin_keypath') try: election_timeout = parse_time(options.election_timeout) framework_failover_timeout = parse_time( options.framework_failover_timeout) except InvalidTime as e: app.error(e.message) try: _, zk_servers, zk_root = zookeeper.parse(options.zk_url) except Exception as e: app.error("Invalid --zk_url: %s" % e.message) web_assets_dir = os.path.join(options.work_dir, "web") pkgutil.unpack_assets(web_assets_dir, MYSOS_MODULE, ASSET_RELPATH) log.info("Extracted web assets into %s" % options.work_dir) fw_principal = None fw_secret = None if options.framework_authentication_file: try: with open(options.framework_authentication_file, "r") as f: cred = yaml.load(f) fw_principal = cred["principal"] fw_secret = cred["secret"] log.info( "Loaded credential (principal=%s) for framework authentication" % fw_principal) except IOError as e: app.error( "Unable to read the framework authentication key file: %s" % e) except (KeyError, yaml.YAMLError) as e: app.error( "Invalid framework authentication key file format %s" % e) log.info("Starting Mysos scheduler") kazoo = KazooClient(zk_servers) kazoo.start() if options.state_storage == 'zk': log.info("Using ZooKeeper (path: %s) for state storage" % zk_root) state_provider = ZooKeeperStateProvider(kazoo, zk_root) else: log.info("Using local disk for state storage") state_provider = LocalStateProvider(options.work_dir) try: state = state_provider.load_scheduler_state() except StateProvider.Error as e: app.error(e.message) if state: log.info("Successfully restored scheduler state") framework_info = state.framework_info if framework_info.HasField('id'): log.info("Recovered scheduler's FrameworkID is %s" % framework_info.id.value) else: log.info("No scheduler state to restore") framework_info = FrameworkInfo( user=options.framework_user, name=FRAMEWORK_NAME, checkpoint=True, failover_timeout=framework_failover_timeout.as_(Time.SECONDS), role=options.framework_role) if fw_principal: framework_info.principal = fw_principal state = Scheduler(framework_info) state_provider.dump_scheduler_state(state) scheduler = MysosScheduler(state, state_provider, options.framework_user, options.executor_uri, options.executor_cmd, kazoo, options.zk_url, election_timeout, options.admin_keypath, installer_args=options.installer_args, backup_store_args=options.backup_store_args, executor_environ=options.executor_environ, framework_role=options.framework_role) if fw_principal and fw_secret: cred = Credential(principal=fw_principal, secret=fw_secret) scheduler_driver = mesos.native.MesosSchedulerDriver( scheduler, framework_info, options.mesos_master, cred) else: scheduler_driver = mesos.native.MesosSchedulerDriver( scheduler, framework_info, options.mesos_master) scheduler_driver.start() server = HttpServer() server.mount_routes(MysosServer(scheduler, web_assets_dir)) et = ExceptionalThread(target=server.run, args=('0.0.0.0', options.api_port, 'cherrypy')) et.daemon = True et.start() try: # Wait for the scheduler to stop. # The use of 'stopped' event instead of scheduler_driver.join() is necessary to stop the # process with SIGINT. while not scheduler.stopped.wait(timeout=0.5): pass except KeyboardInterrupt: log.info('Interrupted, exiting.') else: log.info('Scheduler exited.') app.shutdown( 1 ) # Mysos scheduler is supposed to be long-running thus the use of exit status 1.
def get_task(self, task_id): task = self._observer._task(task_id) if not task: HttpServer.abort( 404, "Failed to find task %s. Try again shortly." % task_id) return task
def __init__(self): HttpServer.__init__(self)
def __init__(self, observer): self._observer = observer StaticAssets.__init__(self) TaskObserverFileBrowser.__init__(self) TaskObserverJSONBindings.__init__(self) HttpServer.__init__(self)
def __init__(self): self._name = BaseServer.NAME HttpServer.__init__(self)
def main(args, options): log.info("Options in use: %s", options) if not options.api_port: app.error('Must specify --port') if not options.mesos_master: app.error('Must specify --mesos_master') if not options.framework_user: app.error('Must specify --framework_user') if not options.executor_uri: app.error('Must specify --executor_uri') if not options.executor_cmd: app.error('Must specify --executor_cmd') if not options.zk_url: app.error('Must specify --zk_url') if not options.admin_keypath: app.error('Must specify --admin_keypath') try: election_timeout = parse_time(options.election_timeout) framework_failover_timeout = parse_time(options.framework_failover_timeout) except InvalidTime as e: app.error(e.message) try: _, zk_servers, zk_root = zookeeper.parse(options.zk_url) except Exception as e: app.error("Invalid --zk_url: %s" % e.message) web_assets_dir = os.path.join(options.work_dir, "web") pkgutil.unpack_assets(web_assets_dir, MYSOS_MODULE, ASSET_RELPATH) log.info("Extracted web assets into %s" % options.work_dir) fw_principal = None fw_secret = None if options.framework_authentication_file: try: with open(options.framework_authentication_file, "r") as f: cred = yaml.load(f) fw_principal = cred["principal"] fw_secret = cred["secret"] log.info("Loaded credential (principal=%s) for framework authentication" % fw_principal) except IOError as e: app.error("Unable to read the framework authentication key file: %s" % e) except (KeyError, yaml.YAMLError) as e: app.error("Invalid framework authentication key file format %s" % e) log.info("Starting Mysos scheduler") kazoo = KazooClient(zk_servers) kazoo.start() if options.state_storage == 'zk': log.info("Using ZooKeeper (path: %s) for state storage" % zk_root) state_provider = ZooKeeperStateProvider(kazoo, zk_root) else: log.info("Using local disk for state storage") state_provider = LocalStateProvider(options.work_dir) try: state = state_provider.load_scheduler_state() except StateProvider.Error as e: app.error(e.message) if state: log.info("Successfully restored scheduler state") framework_info = state.framework_info if framework_info.HasField('id'): log.info("Recovered scheduler's FrameworkID is %s" % framework_info.id.value) else: log.info("No scheduler state to restore") framework_info = FrameworkInfo( user=options.framework_user, name=FRAMEWORK_NAME, checkpoint=True, failover_timeout=framework_failover_timeout.as_(Time.SECONDS), role=options.framework_role) if fw_principal: framework_info.principal = fw_principal state = Scheduler(framework_info) state_provider.dump_scheduler_state(state) scheduler = MysosScheduler( state, state_provider, options.framework_user, options.executor_uri, options.executor_cmd, kazoo, options.zk_url, election_timeout, options.admin_keypath, installer_args=options.installer_args, backup_store_args=options.backup_store_args, executor_environ=options.executor_environ, framework_role=options.framework_role) if fw_principal and fw_secret: cred = Credential(principal=fw_principal, secret=fw_secret) scheduler_driver = mesos.native.MesosSchedulerDriver( scheduler, framework_info, options.mesos_master, cred) else: scheduler_driver = mesos.native.MesosSchedulerDriver( scheduler, framework_info, options.mesos_master) scheduler_driver.start() server = HttpServer() server.mount_routes(MysosServer(scheduler, web_assets_dir)) et = ExceptionalThread( target=server.run, args=('0.0.0.0', options.api_port, 'cherrypy')) et.daemon = True et.start() try: # Wait for the scheduler to stop. # The use of 'stopped' event instead of scheduler_driver.join() is necessary to stop the # process with SIGINT. while not scheduler.stopped.wait(timeout=0.5): pass except KeyboardInterrupt: log.info('Interrupted, exiting.') else: log.info('Scheduler exited.') app.shutdown(1) # Mysos scheduler is supposed to be long-running thus the use of exit status 1.
def get_task(self, task_id): task = self._observer._task(task_id) if not task: HttpServer.abort(404, "Failed to find task %s. Try again shortly." % task_id) return task
def main(args, options): tracker = Tracker() server = HttpServer() server.mount_routes(tracker) server.run('0.0.0.0', port=options.port, server='tornado')