def __init__(self): self.host_manager = HostManager() self.settings = SettingManager() self.stat_writer = StatWriter() self.host_manager.read() self.settings.read() self.ping_interval = self.settings['PING_INTERVAL_S'] self.ping_timeout = self.settings['PING_TIMEOUT_MS']
class PingStats: def __init__(self): self.host_manager = HostManager() self.settings = SettingManager() self.stat_writer = StatWriter() self.host_manager.read() self.settings.read() self.ping_interval = self.settings['PING_INTERVAL_S'] self.ping_timeout = self.settings['PING_TIMEOUT_MS'] async def ping_host(self, host): while True: try: latency = await aioping.ping( host, timeout=self.ping_timeout / 1000) * 1000 self.stat_writer.record(host, latency) await asyncio.sleep(self.ping_interval) except TimeoutError: self.stat_writer.record(host, self.ping_timeout) async def await_run(self): # register all the hosts for host in self.host_manager: self.loop.create_task(self.ping_host(host)) print('Querying %s every %s seconds...' % (host, self.ping_interval)) if not self.host_manager: print('No hosts defined') print('All ping tasks scheduled.') def run(self): self.loop = asyncio.get_event_loop() self.loop.create_task(self.await_run()) try: self.loop.run_forever() except KeyboardInterrupt: print('Logging terminated!') self.stat_writer.write()
async def main(): workdir = sys.argv[1] case = sys.argv[2] hostfile = sys.argv[3] logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG ) host_manager = HostManager.from_hostfile(hostfile) await host_manager.connect_remote() await evaluate(host_manager, workdir, case)
async def main(): workdir = sys.argv[1] input_dats = glob.glob(f"{sys.argv[2]}/*.dat") input_dats = [os.path.realpath(d) for d in input_dats] hostfile = sys.argv[3] logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG) host_manager = HostManager.from_hostfile(hostfile) await host_manager.connect_remote() await evaluate(host_manager, workdir, input_dats)
async def pipeline(fbfilename, hostfilename, workdir_prefix, rootname, maxDM, Nsub, Nint, Tres, zmax): host_manager = HostManager.from_hostfile(hostfilename) base_executor = host_manager.get_base_executor() await host_manager.connect_remote() # TODO: change all remote & local executor's working directory workdir = "./" + workdir_prefix + "_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + "/" ret, output = await base_executor.execute(f"mkdir {workdir}") assert(ret == 0) logger.info(f"Created working directory {workdir}") ret, output = await base_executor.execute(f"readfile {fbfilename}") assert(ret == 0) Nchan, tsamp, BandWidth, fcenter, Nsamp = parse_readfile(output) logger.info(f"Nchan={Nchan}, tsamp={tsamp}, BandWidth={BandWidth}, fcenter={fcenter}, Nsamp={Nsamp}") ret, output = await base_executor.execute( 'DDplan.py -d %(maxDM)s -n %(Nchan)d -b %(BandWidth)s -t %(tsamp)f -f %(fcenter)f -s %(Nsub)s -o %(workdir)s/DDplan.ps' % { 'maxDM':maxDM, 'Nchan':Nchan, 'tsamp':tsamp, 'BandWidth':BandWidth, 'fcenter':fcenter, 'Nsub':Nsub, 'workdir':workdir } ) ddplan = parse_ddplan(output) logger.info(f"ddplan: {ddplan}") # prep, realfft & accelsearch # Todo: figure out ddm decision. stage_1_task_executor = ParallelTaskExecutor() for executor in host_manager.all_executors(): stage_1_task_executor.add_runner( host_manager.get_dispatch_hint(executor), executor, host_manager.get_slot(executor) ) # prep tasks assert(len(ddplan) == 1) current_ddplan = ddplan[0] Nout = Nsamp / current_ddplan['DownSamp'] Nout -= (Nout % 500) # [array([ 0. , 0.5, 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. , 4.5, 5. , # 5.5, 6. , 6.5, 7. , 7.5, 8. , 8.5, 9. , 9.5, 10. , 10.5, # 11. , 11.5]), array([12. , 12.5, 13. , 13.5, 14. , 14.5, 15. , 15.5, 16. , 16.5, 17. , # 17.5, 18. , 18.5, 19. , 19.5, 20. , 20.5, 21. , 21.5, 22. , 22.5, # 23. , 23.5]), array([24. , 24.5, 25. , 25.5, 26. , 26.5, 27. , 27.5, 28. , 28.5, 29. , # 29.5, 30. , 30.5, 31. , 31.5, 32. , 32.5, 33. , 33.5, 34. , 34.5, # 35. , 35.5]), array([36. , 36.5, 37. , 37.5, 38. , 38.5, 39. , 39.5, 40. , 40.5, 41. , # 41.5, 42. , 42.5, 43. , 43.5, 44. , 44.5, 45. , 45.5, 46. , 46.5, # 47. , 47.5]), array([48. , 48.5, 49. , 49.5, 50. , 50.5, 51. , 51.5, 52. , 52.5, 53. , # 53.5, 54. , 54.5, 55. , 55.5, 56. , 56.5, 57. , 57.5, 58. , 58.5, # 59. , 59.5]), array([60. , 60.5, 61. , 61.5, 62. , 62.5, 63. , 63.5, 64. , 64.5, 65. , # 65.5, 66. , 66.5, 67. , 67.5, 68. , 68.5, 69. , 69.5, 70. , 70.5, # 71. , 71.5]), array([72. , 72.5, 73. , 73.5, 74. , 74.5, 75. , 75.5, 76. , 76.5, 77. , # 77.5, 78. , 78.5, 79. , 79.5, 80. , 80.5, 81. , 81.5, 82. , 82.5, # 83. , 83.5])] dm_aggragated = np.arange(current_ddplan['lowDM'], current_ddplan['hiDM'], current_ddplan['dDM']) # dmlist = np.split( # np.arange( # current_ddplan['lowDM'], # current_ddplan['hiDM'], # current_ddplan['dDM'] # ), # current_ddplan['calls'] # ) dmlist = np.split( dm_aggragated, len(dm_aggragated) ) logger.info(f"dmlist: {dmlist}") subdownsamp = current_ddplan['DownSamp'] / 2 datdownsamp = 2 if current_ddplan['DownSamp'] < 2: subdownsamp = datdownsamp = 1 prep_task_uuid = [] # index by dm, todo: string or np.float notation? fft_tasks = {} accel_search_tasks = {} for i, dml in enumerate(dmlist): # generates Sband_DM*.dat & inf lodm = dml[0] subDM = np.mean(dml) subnames = rootname + "_DM%.2f.sub[0-9]" % subDM prepsubcmd = "cd %(workdir)s && prepsubband -nsub %(Nsub)d -lodm %(lowdm)f -dmstep %(dDM)f -numdms %(NDMs)d -numout %(Nout)d -downsamp %(DownSamp)d -o %(root)s ../%(filfile)s" % { 'workdir': workdir, 'Nsub': 1, 'lowdm': lodm, 'dDM': current_ddplan['dDM'], 'NDMs': 1, 'Nout': Nout, 'DownSamp': datdownsamp, 'root': rootname, 'filfile': fbfilename } prep_task = stage_1_task_executor.add_task(f"prep_batch_{i}", [], [prepsubcmd], 1) prep_task_uuid.append(prep_task) for dm in dml: fft_task = stage_1_task_executor.add_task( f"realfft_dm{dm}", [prep_task], [f"cd {workdir} && realfft {rootname}_DM{dm:.2f}.dat"], 1 ) fft_tasks[dm] = fft_task accel_search_task = stage_1_task_executor.add_task( f"accelsearch_dm{dm}", [fft_task], [f"cd {workdir} && accelsearch -zmax 0 {rootname}_DM{dm:.2f}.fft"], 1 ) accel_search_tasks[dm] = accel_search_task stage_1_task_executor.update_alloc() stage_1_task_executor.start_runners() await stage_1_task_executor.wait_until_finish() # sifting & prepfold logger.info("Stage 1 done.") ret, output = await base_executor.execute(f"cp PrestoSifting.py {workdir} && cd {workdir} && python3 PrestoSifting.py 0") assert(ret == 0) logger.debug(f"Sifting output: {output}") cands = parse_accel_sift(output) logger.info(f"Sifting cands: {cands}") stage_2_task_executor = ParallelTaskExecutor() for executor in host_manager.all_executors(): stage_2_task_executor.add_runner( host_manager.get_dispatch_hint(executor), executor, host_manager.get_slot(executor) ) for i, cand in enumerate(cands): # foldcmd = "cd %(workdir)s && prepfold -dm %(dm)f -accelcand %(candnum)d -accelfile %(accelfile)s %(datfile)s -noxwin" % { # 'workdir': workdir, # 'dm': cand['DM'], # 'accelfile': cand['filename'] + '.cand', # 'candnum': cand['candnum'], # 'datfile': ('%s_DM%s.dat' % (rootname, cand['DMstr'])) # } foldcmd = "cd %(workdir)s && prepfold -n %(Nint)d -nsub %(Nsub)d -dm %(dm)f -p %(period)f %(filfile)s -o %(outfile)s -noxwin -nodmsearch" % { 'workdir': workdir, 'Nint': Nint, 'Nsub': Nsub, 'dm': cand['DM'], 'period': cand['p'], 'filfile': f"../{fbfilename}", 'outfile': rootname + '_DM' + cand['DMstr'] } cand_task = stage_2_task_executor.add_task(f"cand_{i}", [], [foldcmd], 1) stage_2_task_executor.update_alloc() stage_2_task_executor.start_runners() await stage_2_task_executor.wait_until_finish() # sifting & prepfold logger.info("Stage 2 done.") await host_manager.close_remote()
def main(argv=None): '''Command line options.''' program_name = os.path.basename(sys.argv[0]) program_version = "v0.2" program_build_date = "%s" % __updated__ program_version_string = '%%prog %s (%s)' % (program_version, program_build_date) #program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse program_longdesc = '''''' # optional - give further explanation about what the program does program_license = "Copyright 2014 user_name (organization_name) \ Licensed under the Apache License 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0" if argv is None: argv = sys.argv[1:] try: # setup option parser usage= "\t-e --create-ovs=IPADDRESS -t=TOPOLOGYNAME (create OVS network)\n\t-p tcl/r10.tcl (parse r10.tcl file and save database)\n\t-s r20 (show the graph of r20 topology)" parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license,usage=usage) parser.add_option("-i", "--in", dest="infile", help="set input path [default: %default]", metavar="FILE") parser.add_option("-o", "--out", dest="outfile", help="set output path [default: %default]", metavar="FILE") parser.add_option("-l", dest="list", help="list elements of database [all, host, topology, link, node, sflow]"); parser.add_option("-s", "--show", dest="show", help="show a network graph of topology",metavar="topology"); parser.add_option("-c","--clear", dest="clear", help="clear entries on the inputted argument [all, host, topology, link, node, sflow]",metavar="element"); parser.add_option("-p", dest="parse", help="parse Brite topology file", metavar="FILE") parser.add_option("-f", "--flow", dest="flow", help="get a routing table of sdn controller ",metavar="IP-address of SDN controller"); #parser.add_option("-a", "--alarm", action="store_true",dest="alarm",default=False, help="get the alarm number from IDS "); eostgroup = OptionGroup(parser, "Experiment options") eostgroup.add_option("-e", action="store_true", dest="experimental",default=False, help="create virtual SDN network") eostgroup.add_option("-j", dest="sampling", help="set sampling configuration json file", metavar="FILE") eostgroup.add_option("-t", "--topology", dest="topology", help="set topology ", metavar="NAME") eostgroup.add_option("--create-ovs", dest="create_ovs_target", help="create OVS network at the target host",metavar="TARGET-IP-ADDRESS") eostgroup.add_option("--clear-ovs", dest="clear_ovs_target", help="clear OVS network at the target host",metavar="TARGET-IP-ADDRESS") eostgroup.add_option("--create-sflow", dest="create_sflow_target", help="create sflow agnets at the target host",metavar="TARGET-IP-ADDRESS") eostgroup.add_option("--clear-sflow", dest="clear_sflow_target", help="clear sflow agnets at the target host",metavar="TARGET-IP-ADDRESS") parser.add_option_group(eostgroup) hostgroup = OptionGroup(parser, "Host registration options") hostgroup.add_option("-r", "--register-host", action="store_true", dest="register_host",default=False, help="register host") hostgroup.add_option("--host-ip", dest="hostip", help="set machine ip address", metavar="IP-ADDRESS") hostgroup.add_option("--user-id", dest="userid", help="set user id") hostgroup.add_option("--password", dest="password", help="set user password") hostgroup.add_option("--controller-ip", dest="controllerip", help="set SDN controller ip") hostgroup.add_option("--inter-ip", dest="interip", help="set the internal ip of the host") parser.add_option_group(hostgroup) # set defaults parser.set_defaults(outfile="./out.txt", infile="./in.txt",role="ovs-host",controllerip="",interip="") dbmanager = DBManager(debug=DEBUG) hostmanager = HostManager(dbmanager) networkmanager = NetworkManager(dbmanager,hostmanager,DEBUG) # process options (opts, args) = parser.parse_args(argv) if opts.list : if opts.list == "all" : dbmanager.list_DB_element("host") dbmanager.list_DB_element("topology") dbmanager.list_DB_element("link") dbmanager.list_DB_element("node") dbmanager.list_DB_element("sflow") elif opts.list == "host": dbmanager.list_DB_element("host") elif opts.list == "topology": dbmanager.list_DB_element("topology") elif opts.list == "link": dbmanager.list_DB_element("link") elif opts.list == "node": dbmanager.list_DB_element("node") elif opts.list == "sflow": dbmanager.list_DB_element("sflow") if opts.parse : topologyparser = Parser(dbmanager,opts.parse,DEBUG) topologyparser.parse_topology() if opts.flow: networkmanager.get_route_table(opts.flow) if opts.show: networkmanager.draw_network(opts.show) if opts.experimental: if opts.create_ovs_target: print("create ovs") networkmanager.generate_OVS_network(opts.topology, opts.create_ovs_target) elif opts.clear_ovs_target: print("clear ovs") print(opts.clear_ovs_target) networkmanager.remove_OVS_network(opts.clear_ovs_target) elif opts.create_sflow_target: print("create sflow agents") node_data = None if (opts.sampling != None): with open(opts.sampling) as json_file: input_data = json.load(json_file) node_data = input_data["nodes"] networkmanager.create_sflow_agnets(opts.create_sflow_target,node_data) print(opts.create_sflow_target) elif opts.clear_sflow_target: print("clear sflow agents") networkmanager.clear_sflow_agnets(opts.clear_sflow_target) print(opts.clear_sflow_target) if opts.clear : if opts.clear == "all": hostmanager.clear_hosts() dbmanager.execute_sql_command("drop table topology") dbmanager.execute_sql_command("drop table link") dbmanager.execute_sql_command("drop table node") dbmanager.execute_sql_command("drop table sflow") elif opts.clear == "host": hostmanager.clear_hosts() elif opts.clear == "topology": dbmanager.execute_sql_command("drop table topology") elif opts.clear == "link": dbmanager.execute_sql_command("drop table link") elif opts.clear == "node": dbmanager.execute_sql_command("drop table node") elif opts.clear == "sflow": dbmanager.execute_sql_command("drop table sflow") if opts.register_host: hostmanager.register_host(opts.hostip,opts.controllerip,opts.userid,opts.password,"None",opts.interip) # MAIN BODY # if len(argv) == 0: parser.print_help() except Exception, e: indent = len(program_name) * " " sys.stderr.write(program_name + ": " + repr(e) + "\n") sys.stderr.write(indent + " for help use --help") return 2