def main(): check_args() iteration = 0 # Parser of any specific command might add more commands to be executed. # Hence continue in a loop. while True: if (all_commands_executed(commands) or iteration >= 10): break iteration += 1 status_update('Iteration: ' + str(iteration)) sorted_keys = sorted(commands.items(), key=lambda (k, v): v['order']) for (cmd, dontcare) in sorted_keys: # Only collect stuff for which we have written a parser if commands[cmd]['parser']: if commands[cmd].get('done', False): continue if commands[cmd].has_key('help'): status_update(commands[cmd]['help']) shell = commands[cmd].get('shell', False) env = None if commands[cmd].get('env', False): env = myenv sudo = commands[cmd].get('sudo', False) if deployment_type == 'multinode': # handling for network node if cmd.startswith('netns_'): commands[cmd]['output'] = exec_on_remote( commands[cmd]['cmd']) if cmd == 'cat_instance': commands[cmd]['output'] = get_vm_info_from_compute( commands[cmd]['cmd']) print commands[cmd]['output'] else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') else: commands[cmd]['output'] = execute_cmd(commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') commands[cmd]['parser'](commands[cmd]['output']) commands[cmd]['done'] = True debug('============= COMMANDS =============') #debug(pprint.pformat(commands)) status_update('Writing collected info into ' + settings['info_file']) dump_json(info, settings['info_file'])
def get_agent_services(aid): services = SInfo.query_by_aid(aid) status_map = { report.service_id: report for report in SPidstatReport.lst_report_by_aid(aid, len(services)) } return dump_json({'services': services, 'services_status_map': status_map})
def main(): check_args() iteration = 0 # Parser of any specific command might add more commands to be executed. # Hence continue in a loop. while True: if (all_commands_executed(commands) or iteration >= 10): break iteration += 1 status_update('Iteration: ' + str(iteration)) sorted_keys = sorted(commands.items(), key=lambda (k, v): v['order']) for (cmd, dontcare) in sorted_keys: # Only collect stuff for which we have written a parser if commands[cmd]['parser']: if commands[cmd].get('done', False): continue if commands[cmd].has_key('help'): status_update(commands[cmd]['help']) shell = commands[cmd].get('shell', False) env = None if commands[cmd].get('env', False): env = myenv sudo = commands[cmd].get('sudo', False) if deployment_type == 'multinode': # handling for network node if cmd.startswith('netns_'): commands[cmd]['output'] = exec_on_remote( commands[cmd]['cmd']) if cmd == 'cat_instance': commands[cmd]['output'] = get_vm_info_from_compute(commands[ cmd]['cmd']) print commands[cmd]['output'] else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') else: commands[cmd]['output'] = execute_cmd( commands[cmd]['cmd'], sudo=sudo, shell=shell, env=env).split('\n') commands[cmd]['parser'](commands[cmd]['output']) commands[cmd]['done'] = True debug('============= COMMANDS =============') # debug(pprint.pformat(commands)) status_update('Writing collected info into ' + settings['info_file']) dump_json(info, settings['info_file'])
def dashboard_summary(): summary = { 'agent_count': Agent.count(), 'service_count': SInfo.count(), 'alarm_count': 0, 'sample_count': 0 } return dump_json(summary)
def exception_handler(error): logging.exception('unexpected error occurs') return dump_json({ 'code': 400, 'message': error.message }), 400, { 'Content-Type': 'application/json' }
def get_agents(): mhost = os.getenv('MASTER_HOST', None) or socket.gethostname() mport = _CONFIG['master']['server']['port'] master_addr = '%s:%s' % (mhost, mport) agents = Agent.query(orderby='last_msg_at DESC') thresh = datetime.utcnow() - timedelta(minutes=5) for a in agents: a.status = 'active' if a.last_msg_at and a.last_msg_at >= thresh else 'inactive' return dump_json({'agents': agents, 'master_addr': master_addr})
def del_agent(aid): connectType = request.args.get('connect_type') u = request.args.get('username') p = request.args.get('password') agent = Agent.get_by_id(aid) logging.info('remove agent on %s@%s', u, agent) with NodeConnector(agent.host, u, p) as nc: nc.remove_agent() agent.remove() logging.info('agent removed on %s@%s finished.', u, agent) return dump_json(agent)
def get_service_jstatgc(aid, service_id): start, end = calc_daterange(request) reports = SJstatGCReport.query_by_rtime(service_id, start, end) # shistory = SInfoHistory.query_by_rtime(service_id, start, end) # calculate gc stats and memory stats gcstat_recent, gcstat_range = None, None if reports: end_reps = [] for i, rep in enumerate(reports): if i > 1 and rep.ts < reports[i - 1].ts: end_reps.append(reports[i - 1]) end_reps.append(reports[-1]) # 1st end reprot - start report to remove data beyond the range end_reps[0] = end_reps[0] - reports[0] range_rep = reduce(lambda acc, r: acc + r, end_reps) final_rep = reports[-1] gcstat_range = range_rep.to_gcstat('range') gcstat_recent = final_rep.to_gcstat('recent') return dump_json({ 'reports': reports, 'gcstats': [gcstat_range, gcstat_recent] })
def get_agents_byload1(): agents = Agent.query_by_load1() return dump_json(agents)
def get_agent(aid): agent = Agent.get_by_id(aid) return dump_json(agent)
def get_service_pidstats(aid, service_id): reports = SPidstatReport.query_by_rtime(service_id, *calc_daterange(request)) return dump_json(reports)
def get_service_info(aid, service_id): service = SInfo.byid(service_id) start, end = calc_daterange(request) service_history = SInfoHistory.query_by_rtime(service_id, start, end) return dump_json({'service': service, 'service_history': service_history})
def get_agent_diskreports(aid): reports = NDiskReport.query_by_rtime(aid, *calc_daterange(request)) return dump_json(reports)
def get_agent_memreports(aid): reports = NMemoryReport.query_by_rtime(aid, *calc_daterange(request)) return dump_json(reports)
def get_agent_sysreports(aid): reports = NSystemReport.query_by_rtime(aid, *calc_daterange(request)) return dump_json(reports)
def path_same_network (params, nms_hops = None): src_ip = params['src_ip'] dst_ip = params['dst_ip'] json_file = params['json_file'] username = params['username'] passwd = params['passwd'] count = params['count'] timeout = params['timeout'] qrouter = params['qrouter'] router = params['router'] if qrouter_usable(qrouter, src_ip, dst_ip, username, passwd): outfile = 'path.ping.txt' ping_process = launch_ping(src_ip, dst_ip, username, passwd, count, timeout, qrouter, outfile) debug('Ping started with pid: %d' % ping_process.pid) capture_packets(params,'src') capture_packets(params, 'dst', src_tag = src_info['tag']) if src_info['tag'] != dst_info['tag']: capture_network_packets(params, nms_hops) status_update('Waiting %s sec for tcpdump and ping processes to complete' % (params['count'] + 2)) time.sleep(params['count'] + 4) status_update('if processes have not stopped, lets kill them') cleanup_processes([ping_process.pid] + src_info['pids'] + dst_info['pids']) if net_info: cleanup_processes(net_info['pids']) process_captures('src') process_captures('dst') if src_info['tag'] != dst_info['tag']: process_network_captures() ping_pass = process_ping(outfile) debug(pprint.pformat(src_info)) debug(pprint.pformat(dst_info)) debug(pprint.pformat(net_info)) info = { 'src' : src_ip, 'dst' : dst_ip, 'src_info' : src_info, 'dst_info' : dst_info, 'net_info' : net_info, 'ping_pass' : ping_pass, 'error' : '', } status_update('Dumping results into %s in JSON format' % params['path_file']) dump_json(info, params['path_file']) if params['plot']: cmd = 'python plot.py --info_file %s --highlight_file %s --combined_file static/ping' % (json_file, params['path_file']) status_update('Running ' + cmd) output = execute_cmd(cmd, shell=True).split('\n') debug(pprint.pformat(output)) status_update('Done') else: err_msg = 'Cannot reach %s via router %s' % (src_ip, router) info = { 'src' : src_ip, 'dst' : dst_ip, 'src_info' : src_info, 'dst_info' : dst_info, 'ping_pass' : False, 'error' : err_msg } error(err_msg) status_update('Dumping results into %s in JSON format' % params['path_file']) dump_json(info, params['path_file']) status_update('Done')
input_source, '--out', join(ROOT_DIR, 'experiments', test_name), '--grammar', ] command.extend(grammar) log_file = join(test_dir, 'reduction.log') with open(log_file, 'w') as stderr_file: run_command(command, test_dir, out=stderr_file, env=dict(environ, PYTHONOPTIMIZE='1')) input_lines, input_chars = process_source(input_source) fails, passes, cache_no, out_lines, out_chars = process_log(log_file) report[test_name] = dict() report[test_name] = { 'input_lines': input_lines, 'input_chars': input_chars, 'output_lines': out_lines, 'output_chars': out_chars, 'test_FAIL': fails, 'test_PASS': passes, 'test_ALL': fails + passes, 'test_CACHE': cache_no, } dump_json(report, print_out=True)
result[key] = value if isinstance(value, list): _TOKEN_COUNTS[key].update(set(value)) yield result if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('config', help='JSON processing config') parser.add_argument('infile', help='raw JSON records, one per line') parser.add_argument('outfile') parser.add_argument('--vocabulary', help='target JSON file of token-count mappings') parser.add_argument('--logging', help='JSON logging config') args = parser.parse_args() settings.configure_logging(args.logging) config = load_json(args.config) preprocessors = list( filter(None, [get_preprocessor(item) for item in config])) with open(args.infile) as f_in: with open(args.outfile, 'w') as f_out: LOGGER.info('writing %s', args.outfile) for record in preprocess(preprocessors, f_in): f_out.write('%s\n' % json.dumps(record)) if args.vocabulary: dump_json(args.vocabulary, _TOKEN_COUNTS)