def __init__(self, schedule_config=None, web_server_config=None, host_machine_config=None, role=None): logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)-1s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') self.logger = logging.getLogger(__name__) if role in ["client", "server", None]: self.role = role else: raise Exception("Need to define a role for a trace collector") if web_server_config != None: self.web_server_config = web_server_config else: self.web_server_config = utils.parse_config("config/web_server_config.json") if host_machine_config != None: self.host_machine_config = host_machine_config else: self.host_machine_config = utils.parse_config("config/host_machine_config.json") if schedule_config != None: self.schedule_config = schedule_config else: self.schedule_config = utils.parse_config("config/schedule_config.json") self.hostname = socket.gethostname() self.config = self.host_machine_config[self.hostname] self.scheduling_list = self.schedule_config["scheduling_list"] self.scheduling_general_config = self.schedule_config["scheduling_general_config"] self.test_config_list = self.generate_test_config_list() self.scheduling_server_port = self.schedule_config["scheduling_server_port"] if self.role == "client": self.data_collector = collector.TraceDataCollectionClient(host_machine_config=self.host_machine_config) else: self.data_collector = collector.TraceDataCollectionServer(host_machine_config=self.host_machine_config) self.data_analyzer = analyzer.TraceDataAnalyzer(web_server_config=self.web_server_config)
def main(): parser = argparse.ArgumentParser(description="IT TESTS THE MIRACLES") parser.add_argument("-p", "--path", help='Path to test videos', type=str) parser.add_argument('--config', help='Configuration file in JSON', required=True) parser.add_argument('--output', required=True) args = parser.parse_args() conf = parse_config(args.config) video_conf = conf['format_video_to_save'] bus_conf = conf['bus'] person_conf = conf['person'] deep_sort_conf = conf['deep_sort'] geometry = parse_config(conf['geometry_filename']) TP = 0 FP = 0 FN = 0 ALL = 0 values = [] all_folders = len(glob.glob(args.path + "*")) i = 1 for folder in glob.glob(args.path + "*"): print(f'Start test for {i}/{all_folders}...') i += 1 video = os.path.join(folder, os.path.split(folder)[-1] + ".asf") annotation = os.path.join(folder, 'annotation.csv') if os.path.isfile(video) and os.path.isfile(annotation): run_test(video, os.path.join(folder, os.path.split(folder)[-1]), conf['model_filename'], conf['min_confidence'], video_conf['fps'], video_conf['width'], video_conf['height'], deep_sort_conf['max_cosine_distance'], deep_sort_conf['model_filename'], bus_conf['count_to_change_state'], bus_conf["min_distance"], bus_conf["distance_per_sec"], bus_conf["nn_age"], bus_conf["zoom_in_x"], bus_conf["bottom_part"], person_conf['nn_age'], geometry) tp, fp, fn, all_cases = stat( annotation, os.path.join(folder, os.path.split(folder)[-1]) + '.csv') values.append((video, int(tp), int(fp), int(fn), int(all_cases))) TP += tp FP += fp FN += fn ALL += all_cases print(f'result: TP={TP}, FP={FP}, FN={FN} ALL={ALL}') df = pd.DataFrame(values, columns=['Filename', 'TP', 'FP', 'FN', 'All']) df.to_csv(f"{args.output}.csv", index=None)
def __init__(self): """""" utils.parse_config() self.logging = getLogger(ContextManager.__class__.__name__) self.manager_loop = lambda: self.__isrunning utils.Cache(components={}) utils.Cache(storage={}) BackendServerMQ.__init__( self, **utils.Cache().get('config.core.backend.kwargs')) self.logging.info('IoT Manager started at pid: {}'.format(os.getpid())) self.logging.info('Starting components...') self.start_framework() threading.Thread.__init__(self)
def get_data_loaders_from_config(config): ''' Returns a dict with the phases as keys and respective dataloaders as values. ''' classes_txt = config['datasets']['classes']['classes_txt'] phase2dataset = {} for phase, phase_dict in config['datasets']['sets'].items(): assert phase in PHASES, f'Phases configured must be either {PHASES}.' print() print(phase) phase_aug_dicts = parse_config(phase_dict['aug'], verbose=False) img_transforms = build_transforms(phase_aug_dicts) phase2dataset[phase] = ImageCSVDataset(phase_dict['csv'], classes_txt, transform=img_transforms, target_transform=None) phase2shuffle = {'train': True, 'val': False, 'test': False} bs = config['training']['batch_size'] num_workers = config['training']['data_num_workers'] dataloaders_dict = {} for phase, dataset in phase2dataset.items(): dataloaders_dict[phase] = DataLoader(dataset, batch_size=bs, shuffle=phase2shuffle[phase], num_workers=num_workers, sampler=None, pin_memory=True) return dataloaders_dict, phase2dataset['train'].classes
def generate(self): config = utils.parse_config() commands = config.get('commands') output = '' separator = ' && ' for repo in self.project.get_repos(**config): clone_info = repo.get('links').get('clone') for clone_info_item in clone_info: if clone_info_item.get('name') == 'http': clone_http_href = clone_info_item.get('href') if clone_info_item.get('name') == 'ssh': clone_ssh_href = clone_info_item.get('href') for command in commands: command_replaced = command.replace( '{url}', clone_http_href).replace( '{url:htttp}', clone_http_href).replace( '{url:ssh}', clone_ssh_href).replace( '{project}', config.get('bitbucket').get( 'project')).replace( '{repo}', repo.get('slug')) output += textwrap.dedent(f'{command_replaced}{separator}') if output.endswith(separator): output = output[:len(output) - 4] print(output)
def init_conf(argv): """ Populates CONF with key-value pairs from configuration file. """ CONF.update(utils.parse_config(argv[1], 'crawl')) CONF['seeders'] = CONF['seeders'].strip().split("\n") CONF['exclude_asns'] = CONF['exclude_asns'].strip().split("\n") CONF['exclude_ipv4_networks'] = list_excluded_networks( CONF['exclude_ipv4_networks']) CONF['exclude_ipv6_networks'] = list_excluded_networks( CONF['exclude_ipv6_networks']) CONF['initial_exclude_ipv4_networks'] = CONF['exclude_ipv4_networks'] CONF['user_agent_blacklist'] = CONF['user_agent_blacklist'].strip().split( "\n") if CONF['onion']: tor_proxy = CONF['tor_proxy'].split(":") CONF['tor_proxy'] = (tor_proxy[0], int(tor_proxy[1])) else: CONF['tor_proxy'] = None CONF['onion_nodes'] = CONF['onion_nodes'].strip().split("\n") # Set to True for master process CONF['master'] = argv[2] == "master"
def __init__(self, cfgfile): super(Darknet, self).__init__() self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.config = parse_config(cfgfile) self.net_params, self.module_list = create_module_list( self.config, self.device)
def get_test_data_loader_from_config(config): ''' return dataloader for test set that gives additional image path ''' classes_txt = config['datasets']['classes']['classes_txt'] test_config_dict = config['datasets']['sets']['test'] test_aug_dict = parse_config(test_config_dict['aug'], verbose=False) img_transforms = build_transforms(test_aug_dict) test_dataset = ImageCSVDatasetWithPath(test_config_dict['csv'], classes_txt, transform=img_transforms, target_transform=None) bs = config['training']['batch_size'] num_workers = config['training']['data_num_workers'] dataloader = DataLoader(test_dataset, batch_size=bs, shuffle=False, num_workers=num_workers, sampler=None, pin_memory=True) return dataloader, test_dataset.classes
def set_config(): if opt.config: config = parse_config(opt.config) else: raise_exception('--config must be specified.') if isinstance(config.DATA.SCALE, int): config.DATA.SCALE = (config.DATA.SCALE, config.DATA.SCALE) # make tuple if not opt.tag: opt.tag = utils.get_file_name(opt.config) if opt.local_rank is not None: opt.gpu_id = opt.local_rank opt.device = 'cuda:' + str(opt.gpu_id) if torch.cuda.is_available() and opt.gpu_id != -1 else 'cpu' if opt.debug: config.MISC.SAVE_FREQ = 1 config.MISC.VAL_FREQ = 1 config.MISC.LOG_FREQ = 1 if opt.tag != 'default': pid = f'[PID:{os.getpid()}]' with open('run_log.txt', 'a') as f: f.writelines(utils.get_time_str(fmt="%Y-%m-%d %H:%M:%S") + ' ' + pid + ' ' + get_command_run() + '\n') return config
def main(args): config = parse_config(args.config) out_dir = Path(config['training']['save_dir']) / config['training']['save_context'] out_dir.mkdir(exist_ok=True, parents=True) model = build_model_from_config(config) test_dataloader, classes = get_test_data_loader_from_config(config) conf_mat, report, preds_np, labels_np, all_paths = test_model(model, test_dataloader, config, classes=classes) test_dir = out_dir / 'test' test_dir.mkdir(exist_ok=True, parents=True) test_out = test_dir / f"{config['training']['save_context']}_clfreport.log" with test_out.open('w') as wf: wf.write(report) sn_plot = sn.heatmap(conf_mat, annot=True, fmt='g', xticklabels=classes, yticklabels=classes) test_out_cm = test_dir / f"{config['training']['save_context']}_confmat.jpg" sn_plot.get_figure().savefig(test_out_cm) test_out_wrong = test_dir / "wrong" wrong_idx = np.where(preds_np != labels_np)[0] for i, tup in enumerate(zip(all_paths, labels_np, preds_np)): path, label, pred = tup if i in wrong_idx: out_wrong_dir = test_out_wrong / classes[label] out_wrong_dir.mkdir(exist_ok=True, parents=True) wrong_impath = out_wrong_dir / f'{i}_{classes[pred]}.jpg' cv2.imwrite(str(wrong_impath), cv2.imread(path))
def _load(self, account): self.account = account # Create home directory utils.make_dir("") self.configfile = utils.get_root_filename("config.json") # Create user directory userfolder = "%s.%s" % (account["username"], account["api"]) utils.make_dir(userfolder) self.msg.info( self.name, "Trackma v{0} - using account {1}({2}).".format(utils.VERSION, account["username"], account["api"]), ) self.msg.info(self.name, "Reading config files...") try: self.config = utils.parse_config(self.configfile, utils.config_defaults) except IOError: raise utils.EngineFatal("Couldn't open config file.") # Load hook file if os.path.exists(utils.get_root_filename("hook.py")): import sys sys.path[0:0] = [utils.get_root()] try: self.msg.info(self.name, "Importing user hooks (hook.py)...") global hook import hook self.hooks_available = True except ImportError: self.msg.warn(self.name, "Error importing hooks.") del sys.path[0]
def crawl_all(): """ Crawl urls asynchronously and return url and it's response. """ sites = [] failed = {} hosts = parse_config(hosts=True) for url in hosts: sites.append(url['url']) #do some multi crawl on them all requests = multi_get(sites,timeout=25.5) for url, data in requests: failed_data = [] for host in hosts: if host['url'] == url: html = data.read() match = re.search(host['match'], html) if not match or data.code not in (200,302): failed_data.append(host['name']) failed_data.append(data.code) failed_data.append(html) failed[url] = failed_data return failed
def __init__(self): motors = parse_config()['motors'] self.motor_left = Motor(en_pin=motors['en_pin'], slew_pin=motors['slew_pin'], pwm_pin=motors['left']['pwm_pin'], forward_pin=motors['left']['forward_pin'], reverse_pin=motors['left']['reverse_pin'], sf_pin=motors['left']['state_flag_pin'], name='left') self.motor_right = Motor(en_pin=motors['en_pin'], slew_pin=motors['slew_pin'], pwm_pin=motors['right']['pwm_pin'], forward_pin=motors['right']['forward_pin'], reverse_pin=motors['right']['reverse_pin'], sf_pin=motors['right']['state_flag_pin'], name='right') self.motor_left.disable() self.motor_right.disable() # todo: Implement a 2d matrix to represent direction and duration of previous movement. self.travel_memory = list() self.turn_a = 22.5 self.turn_limit = 180 / self.turn_a self.last_turn = None self.turns = 0 self.turn_speed = 440 self.opposites = {"left": "right", "right": "left"}
def create_modules(): module_list = nn.ModuleList() config, prev_filters, output_filters = utils.parse_config(), 3, [] # We need to skip the "net" layer and hence start iterating from index 1 for index, config_dict in enumerate(config[1:]): if config_dict["type"] == constants.CONVOLUTIONAL_LAYER: conv_layer, filters = create_conv_layer(config_dict, index, prev_filters) module_list.append(conv_layer) elif config_dict["type"] == constants.SHORTCUT_LAYER: shortcut_layer = create_shortcut_layer(index) module_list.append(shortcut_layer) filters = prev_filters elif config_dict["type"] == constants.ROUTE_LAYER: route_layer, filters = create_route_layer(config_dict, index, output_filters) module_list.append(route_layer) elif config_dict["type"] == constants.UPSAMPLE_LAYER: upsample_layer = create_upsample_layer(config_dict, index) module_list.append(upsample_layer) filters = prev_filters elif config_dict["type"] == constants.YOLO_LAYER: yolo_layer = create_yolo_layer(config_dict, index) module_list.append(yolo_layer) filters = prev_filters output_filters.append(filters) prev_filters = filters return module_list
def viz(args): config = parse_config(args.config) dataloaders = get_data_loaders_from_config(config) for phase, dataloader in dataloaders.items(): print(phase) next_phase = False for step, data in enumerate(tqdm(dataloader)): inputs, labels = data for input_, label in zip(inputs, labels): input_ = inverse_normalize(input_, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) img = input_.permute(1, 2, 0).numpy() img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) cv2.imshow(f'{label}', img) key = cv2.waitKey(0) if key == ord('q'): exit() elif key == ord('n'): next_phase = True cv2.destroyAllWindows() break if next_phase: break
def _load(self, account): self.account = account # Create home directory utils.make_dir('') self.configfile = utils.get_root_filename('config.json') # Create user directory userfolder = "%s.%s" % (account['username'], account['api']) utils.make_dir(userfolder) self.msg.info( self.name, 'Trackma v{0} - using account {1}({2}).'.format( utils.VERSION, account['username'], account['api'])) self.msg.info(self.name, 'Reading config files...') try: self.config = utils.parse_config(self.configfile, utils.config_defaults) except IOError: raise utils.EngineFatal("Couldn't open config file.") # Load hook file if os.path.exists(utils.get_root_filename('hook.py')): import sys sys.path[0:0] = [utils.get_root()] try: self.msg.info(self.name, "Importing user hooks (hook.py)...") global hook import hook self.hooks_available = True except ImportError: self.msg.warn(self.name, "Error importing hooks.") del sys.path[0]
def __init__(self): os.environ["TCL_LIBRARY"] = "/lib64/tcl8.5/" self.win = tk.Tk() self.win.title("Continuous Aggregate Monitor: Alarm Handler") self.win.configure(background=u.lightgrey_color) self.get_alarm_handler_style() self.delim = ',' self.pdelim = '=' parser = ArgumentParser() parser.add_argument("-f", "--file", dest="filename", help="Configuration File Location", metavar="FILE", default="alarmConfig.txt") args = vars(parser.parse_args()) # File array is literally an array of data with methods for reading a writing to disk self.conf = alarm_object.FILE_ARRAY(args['filename'],self.pdelim) # This method gets config data from the array u.parse_config(self.conf) # This update changes the config data for this instance of AlarmHandler (probably should be more like self.update_config(), but this works) u.update_config(self) # This is the initial get of alarm handler data from disk self.fileArray = alarm_object.FILE_ARRAY(self.filename,self.delim) if len(self.fileArray.filearray) == 0: print("ERROR: Null alarm input file, please resolve in configure file") self.quit() # This is the initial get of alarm handler's previous instance history data from disk self.HL = alarm_object.HISTORY_LIST(self.histfilename,self.delim,self.pdelim,self.timeWait) # This tacks on to the end of the alarm handler data and "external" alarm information - allows an online analyzer or standalone script to supplement alarms into this GUI if os.path.exists(self.externalFilename): # Special case for running in an external situation like Japan or camguin analysis self.externalFileArray = alarm_object.FILE_ARRAY(self.externalFilename,self.delim) else: self.externalFileArray = None # FIXME here is the instantiation into memory of alarm data self.OL = alarm_object.OBJECT_LIST(self.fileArray,self.cooldownLength) # Alarm indicator image, also serves as a sound checker and GUI refresh when clicked self.masterAlarmImage = tk.PhotoImage(file='ok.ppm').subsample(2) self.masterAlarmButton = tk.Label(self.win, image=self.masterAlarmImage, cursor="hand2", bg=u.lightgrey_color) # This is the TCP/IP connection to the alarm sound server self.alarmClient = bclient.sockClient(self.remoteName) # Loop checks alarms self.alarmLoop = alarm_object.ALARM_LOOP(self) # Loop controls GUI refreshes self.alarmLoopGUI = alarm_object.ALARM_LOOP_GUI(self) # Loop controls sound making in the background self.alarmLoopMonitor = alarm_object.ALARM_LOOP_MONITOR(self) # Creates all GUI tabs self.tabs = self.create_widgets()
def main(*args, **kwargs): usage = """ Usage: ./xes_comparator_script.py <.ini config filename> Config file options: %s\n NOTE: Do you have the needed environment variables? - XES : Path to .xes file with traces (for running PacH) - PNML : Path to .pnml file with Petri net model (for simplifying PROM models) - NXES : Path to .xes file with negative traces - PETRI : Path where simplified .pnml files should be moved to after script ends - STATS : Path where statistic files should be moved to after script ends IMPORTANT: NO PATH MUST END IN '/' (it is added automatically) """%(config_options) if not check_argv(sys.argv, minimum=1, maximum=4): print usage ret = -1 else: ret = 0 try: config_file = sys.argv[1] if not config_file.endswith('.ini'): print config_file, ' does not end in .ini. It should...' raise Exception('Filename has wrong extension') if not isfile(config_file): raise Exception("No such file") if '--debug' in sys.argv: pdb.set_trace() for filename, arguments in parse_config(config_file): comparator = ComparatorXes(filename, **arguments) #comparator.comparator.check_hull(log_file=filename,event_dictionary=comparator.pach.event_dictionary) complexity = comparator.compare(log_file=filename,event_dictionary=comparator.pach.event_dictionary) comparator.generate_pnml() comparator.generate_outputs() if '--verbose' in sys.argv: print complexity pnml_folder,out_folder = parse_config_output(config_file) pwd = os.getcwd() for basename in os.listdir(pwd): if basename.endswith('.pnml'): pnml_file = os.path.join(pwd, basename) if os.path.isfile(pnml_file): shutil.copy2(pnml_file, pnml_folder) os.remove(pnml_file) elif basename.endswith('.out'): out_file = os.path.join(pwd, basename) if os.path.isfile(out_file): shutil.copy2(out_file, out_folder) os.remove(out_file) except Exception, err: ret = 1 if hasattr(err, 'message'): print 'Error: ', err.message else: print 'Error: ', err logger.error('Error: %s' % err, exc_info=True) raise return ret
def __init__(self, web_server_config=None): logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)-1s [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') self.logger = logging.getLogger(__name__) if web_server_config != None: self.web_server_config = web_server_config else: self.web_server_config = utils.parse_config("config/web_server_config.json") self.web_server_ip = self.web_server_config["web_interface_server_ip"] self.web_interface_dir = self.web_server_config["web_interface_dir"]
def test_collector(): host_machine_config = utils.parse_config("config/host_machine_config.json") test_config_download = utils.parse_config("config/test_config_all.json")["iperf_tcpdump_download"] test_config_upload = utils.parse_config("config/test_config_all.json")["iperf_tcpdump_upload"] role = host_machine_config[socket.gethostname()]["role"] if role == "server": data_collector = collector.TraceDataCollectionServer() if role == "client": data_collector = collector.TraceDataCollectionClient() data_collector.print_attribute() #download_result_log = data_collector.iperf_tcpdump_download(test_config_download) #upload_result_log = data_collector.iperf_tcp_dump_upload(test_config_upload) data_analyzer = analyzer.TraceDataAnalyzer() data_analyzer.draw_graph("log/iperf_tcpdump_result/CSL_4G_download_bbr_-1/") data_analyzer.post_file_to_server("log/iperf_tcpdump_result/CSL_4G_download_bbr_-1/")
def _load(self, account): self.account = account # Create home directory utils.make_dir('') self.configfile = utils.get_root_filename('config.json') # Create user directory userfolder = "%s.%s" % (account['username'], account['api']) utils.make_dir(userfolder) self.userconfigfile = utils.get_filename(userfolder, 'user.json') self.msg.info(self.name, 'Reading config files...') try: self.config = utils.parse_config(self.configfile, utils.config_defaults) self.userconfig = utils.parse_config(self.userconfigfile, utils.userconfig_defaults) except IOError: raise utils.EngineFatal("Couldn't open config file.")
def api_email(api_title, headers=None, html=None, fail=None): """ email sending in case api is down """ msg = MIMEMultipart() attach_txt = txt = '' mail = parse_config(mails=True) if html: #attach html dump content = MIMEApplication(html) content.add_header('Content-Disposition', 'attachment', filename=api_title.lower()+'_content.html') msg.attach(content) if headers: #attach header dump head = MIMEApplication(headers) head.add_header('Content-Disposition', 'attachment', filename=api_title.lower()+'_headers.html') msg.attach(head) if fail: msg['Subject'] = "API headers test failed for %s \n" % api_title txt += "*** A gentle prod to remind you ***\n" txt += """ %s API seems to be down. Fabricbot tried to crawl the api response but no valid content was retreived. Also, invalid http headers were received from server. -- The site monitoring automation""" % api_title else: msg['Subject'] = "API content test failed for %s \n" % api_title txt += "*** A gentle prod to remind you ***\n" txt += """ %s API seems to be down. Fabricbot tried to crawl the api response but no valid content was retreived. -- The site monitoring automation""" % api_title msg['From'] = 'fabricapi' msg['To'] = mail[1] # That is what u see if dont have an email reader: msg.preamble = 'Multipart massage.\n' # This is the textual part: part = MIMEText(txt) msg.attach(part) # Create an instance in SMTP server smtp = SMTP('localhost') # Send the email smtp.sendmail(msg['From'], msg['To'], msg.as_string()) smtp.quit()
def test_scheduler(): host_machine_config = utils.parse_config("config/host_machine_config.json") role = host_machine_config[socket.gethostname()]["role"] if role == "server": my_scheduler = scheduler.TraceDataSchedulerServer() if role == "client": my_scheduler = scheduler.TraceDataSchedulerClient() my_scheduler.print_attribute() my_scheduler.scheduling(loop=True)
def download_iperf_wireshark(main_config = None): if main_config == None: main_config = utils.parse_config("config/config.json") main_config = main_config["download_iperf_wireshark"] exec_mode = main_config["mode"] selected_network = main_config["network"] selected_direction = main_config["direction"] selected_variant = main_config["variant"] server_ip = main_config["server_ip"] server_packet_sending_port = main_config["server_packet_sending_port"] server_iperf_port = main_config["iperf_port"] iperf_logging_interval = main_config["iperf_logging_interval"] server_address_port = (server_ip, server_packet_sending_port) pcap_result_path = os.path.join(main_config["pcap_path"], main_config["task_name"]) pcap_result_subpath_variant = os.path.join(pcap_result_path, selected_variant) task_time = main_config["time_each_flow"] time_flow_interval = 5 # wait some time to keep stability logger.info("{}--> download_iperf_wireshark, Start~~, Model: {}".format(current_script, exec_mode)) server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if exec_mode == "scheduling": my_socket.retry_bind(server_socket, server_address_port) server_socket.listen(10) while True: client_socket, client_address = server_socket.accept() logger.debug("Recieve from client {}".format(client_address)) message = my_socket.doki_wait_receive_message(client_socket).replace("##DOKI##", "") if message == "download_iperf_start": client_socket.close() current_datetime = datetime.fromtimestamp(time.time()) os.system("iperf3 -s -p {} -i {} 2> /dev/null &".format(server_iperf_port, iperf_logging_interval)) time.sleep(task_time + 2 * time_flow_interval) os.system('killall iperf3 > /dev/null 2>&1') if message == "download_iperf_end": client_socket.close() logger.info("Server--> download_iperf_wireshark, Done~~") server_socket.close() exit() if exec_mode == "continue": os.system('killall iperf3 > /dev/null 2>&1') time.sleep(1) while True: logger.debug("{}_{}--> Start iperf server".format(current_script, inspect.currentframe().f_lineno)) #os.system("iperf3 -s -p {} -i {} 2> /dev/null".format(server_iperf_port, iperf_logging_interval)) os.system("iperf3 -s -p {} -i {}".format(server_iperf_port, iperf_logging_interval)) logger.warning("Server iperf exit, resuming..") if selected_variant != "udp": os.system("sudo sysctl net.ipv4.tcp_congestion_control={}".format(selected_variant)) time.sleep(5) os.system('killall iperf3 > /dev/null 2>&1') time.sleep(5)
def run_batch(self): config = utils.parse_config() bitbucket = config.get('bitbucket') operations = config.get('operations') project_operations = config.get('project-operations') self._exec_ops(project_operations, self.__project_ops__, **config) for repo in self.project.get_repos(**config): bitbucket['repo'] = repo.get('slug') self._exec_ops(operations, self.__ops__, **config)
def load(self, account): self.account = account # Create home directory utils.make_dir('') self.configfile = utils.get_root_filename('config.json') # Create user directory userfolder = "%s.%s" % (account['username'], account['api']) utils.make_dir(userfolder) self.userconfigfile = utils.get_filename(userfolder, 'user.json') self.msg.info(self.name, 'Reading config files...') try: self.config = utils.parse_config(self.configfile, utils.config_defaults) self.userconfig = utils.parse_config(self.userconfigfile, utils.userconfig_defaults) except IOError: raise utils.EngineFatal("Couldn't open config file.")
def apis_check(sys_logger): """ Check APIs. """ #start counter bookie(apis_check) #get config values apis = parse_config(apis=True) for api in apis: ap = apitest(api['name'], api['url'], api['match'], api['status'], api['enabled']) ap.check(sys_logger)
def __init__(self, log, seed): self.pool_name = utils.generate_random_string(prefix="pool") self.wallet_name = utils.generate_random_string(prefix='wallet') self.pool_handle = self.wallet_handle = 0 self.log = log self.seed = seed self.config = utils.parse_config() self.threads = list() self.passed_req = self.failed_req = 0 self.start_time = self.finish_time = 0 self.fastest_txn = self.lowest_txn = -1
def up(self): """ Call the booter to the corresponding topology class. """ if self.topo == 'fw_lb': config = utils.parse_config(self.config['FwLbTopo']) fw_config = utils.parse_fw_config(self.config['FWaaS']) FwLbTopo(opts=config, fw_opts=fw_config, session=self.kwargs['session'], token=self.kwargs['token'], neutron_endpoint=self.kwargs['neutron_endpoint']).up()
def main(): parser = argparse.ArgumentParser( description='For different background job') parser.add_argument('function', type=str, help='the job') parser.add_argument('--post', type=int, help='whether post to server') parser.add_argument('--display', type=int, help='whether draw graph') parser.add_argument('--config_path', type=str, help='path of config file') args = parser.parse_args() if args.function in ["download_iperf_wireshark", "upload_iperf_wireshark"]: if args.config_path: main_config = utils.parse_config(args.config_path)[args.function] else: main_config = utils.parse_config("config/config.json")[ args.function] if args.post == 1: result_generate_iperf_wireshark(main_config, post_to_server=True) else: result_generate_iperf_wireshark(main_config, post_to_server=False) if args.display == 1: result_draw_iperf_wireshark(main_config)
def main(args): config = parse_config(args.config) out_dir = Path( config['training']['save_dir']) / config['training']['save_context'] out_dir.mkdir(exist_ok=True, parents=True) config_out = out_dir / f"{config['training']['save_context']}_config.yaml" with config_out.open('w') as wf: yaml.dump(config, wf) print(f'Training config saved to {config_out}.') tb_logdir = out_dir / 'logdir' writer = SummaryWriter(log_dir=tb_logdir) model = build_model_from_config(config) dataloaders, classes = get_data_loaders_from_config(config) if config['datasets']['viz']: viz_to_tb(dataloaders['train'], writer, config['datasets']['classes']['num_classes']) losses = get_train_val_losses() optimizer = get_optim_from_config(model.parameters(), config) scheduler = get_scheduler_from_config(optimizer, config) model, best_acc, best_loss, best_epoch, total_epoch = train_model( model, dataloaders, losses, optimizer, scheduler, writer, config) weights_dir = out_dir / 'weights' weights_dir.mkdir(parents=True, exist_ok=True) save_path = weights_dir / f"{config['training']['save_context']}_bestval_loss{best_loss:0.3f}_acc{best_acc:0.3f}_ep{best_epoch}of{total_epoch}.pth" torch.save(model.state_dict(), save_path) print(f'Best val weights saved to {save_path}') conf_mat, report, _, _, _ = test_model(model, dataloaders['test'], config, classes=classes) test_dir = out_dir / 'test' test_dir.mkdir(exist_ok=True, parents=True) test_out = test_dir / f"{config['training']['save_context']}_clfreport.log" with test_out.open('w') as wf: wf.write(report) sn_plot = sn.heatmap(conf_mat, annot=True, fmt='g', xticklabels=classes, yticklabels=classes) test_out_cm = test_dir / f"{config['training']['save_context']}_confmat.jpg" sn_plot.get_figure().savefig(test_out_cm)
def run(args: argparse.Namespace): dataset_path = args.target_dataset.name config = parse_config(args.config) classifier = get_classifier(config) gnb = train(classifier, dataset_path, print_confusion_matrix=args.confusion_matrix, **config['classifier_kwargs']) if args.predict: image_path = args.predict.name predict_character(config, gnb, image_path, max_workers=args.max_workers)
def main(*args, **kwargs): usage = """ Usage: ./pnml_comparator_script.py <.ini config filename> """ if not check_argv(sys.argv, minimum=1, maximum=4): print usage ret = -1 else: ret = 0 try: config_file = sys.argv[1] if not config_file.endswith('.ini'): print config_file, ' does not end in .ini. It should...' raise Exception('Filename has wrong extension') if not isfile(config_file): raise Exception("El archivo especificado no existe") if '--debug' in sys.argv: pdb.set_trace() for filename, arguments in parse_config(config_file): comparator = ComparatorPnml(filename, **arguments) comparator.check_hull() complexity = comparator.compare() logger.info('%s complexity -> %s',filename,complexity) comparator.generate_pnml() comparator.generate_outputs() comparator.check_hull() pnml_folder,out_folder = parse_config_output(config_file) pwd = os.getcwd() for basename in os.listdir(pwd): if basename.endswith('.pnml'): pnml_file = os.path.join(pwd, basename) if os.path.isfile(pnml_file): shutil.copy2(pnml_file, pnml_folder) os.remove(pnml_file) elif basename.endswith('.out'): out_file = os.path.join(pwd, basename) if os.path.isfile(out_file): shutil.copy2(out_file, out_folder) os.remove(out_file) except Exception, err: ret = 1 if hasattr(err, 'message'): print 'Error: ', err.message else: print 'Error: ', err raise err return ret
def init_conf(argv): """ Populates CONF with key-value pairs from configuration file. """ CONF.update(utils.parse_config(argv[1], 'ping')) if CONF['onion']: tor_proxy = CONF['tor_proxy'].split(":") CONF['tor_proxy'] = (tor_proxy[0], int(tor_proxy[1])) else: CONF['tor_proxy'] = None if not os.path.exists(CONF['crawl_dir']): os.makedirs(CONF['crawl_dir']) # Set to True for master process CONF['master'] = argv[2] == "master"
def main(*args, **kwargs): usage = """ Usage: ./pach_script.py <.ini config filename> """ if not check_argv(sys.argv, minimum=1, maximum=4): print usage ret = -1 else: ret = 0 try: config_file = sys.argv[1] if not config_file.endswith('.ini'): print config_file, ' does not end in .ini. It should...' raise Exception('Filename has wrong extension') if not isfile(config_file): raise Exception("No such file") if '--debug' in sys.argv: pdb.set_trace() for filename, arguments in parse_config(config_file): pach = PacH(filename, **arguments) complexity = pach.pach() pach.generate_pnml() if '--verbose' in sys.argv: print complexity pnml_folder,out_folder = parse_config_output(config_file) pwd= os.getcwd() for basename in os.listdir(pwd): if basename.endswith('.pnml'): pnml_file = os.path.join(pwd, basename) if os.path.isfile(pnml_file): shutil.copy2(pnml_file, pnml_folder) os.remove(pnml_file) elif basename.endswith('.out'): out_file = os.path.join(pwd, basename) if os.path.isfile(out_file): shutil.copy2(out_file, out_folder) os.remove(out_file) except Exception, err: ret = 1 if hasattr(err, 'message'): print 'Error: ', err.message else: print 'Error: ', err logger.error('Error: %s' % err, exc_info=True) raise err return ret
def main(): utils.init_dir() parser = argparse.ArgumentParser(description='For different background job') parser.add_argument('function', type=str, help='the job') parser.add_argument('--config_path', type=str, help='path of config file') args = parser.parse_args() #udp_socket() main_config = None if args.config_path: main_config = utils.parse_config(args.config_path) if args.function == "upload_iperf_wireshark": upload_iperf_wireshark(main_config) if args.function == "download_iperf_wireshark": download_iperf_wireshark(main_config) if args.function == "download_socket": download_socket()
def main(argv): if len(argv) < 2 or not os.path.exists(argv[1]): print("Usage: resolve.py [config]") return 1 # Initialize global conf CONF.update(utils.parse_config(argv[1], 'resolve')) # Initialize logger loglevel = logging.INFO if CONF['debug']: loglevel = logging.DEBUG logformat = ("%(asctime)s %(levelname)s (%(funcName)s) " "%(message)s") logging.basicConfig(level=loglevel, format=logformat, filename=CONF['logfile'], filemode='w') global REDIS_CONN REDIS_CONN = utils.new_redis_conn(db=CONF['db']) subscribe_key = 'snapshot:{}'.format(hexlify(CONF['magic_number'])) publish_key = 'resolve:{}'.format(hexlify(CONF['magic_number'])) pubsub = REDIS_CONN.pubsub() pubsub.subscribe(subscribe_key) while True: msg = pubsub.get_message() if msg is None: time.sleep(0.001) # 1 ms artificial intrinsic latency. continue # 'snapshot' message is published by ping.py after establishing # connection with nodes from a new snapshot. if msg['channel'] == subscribe_key and msg['type'] == 'message': timestamp = int(msg['data']) logging.info("Timestamp: %d", timestamp) nodes = REDIS_CONN.smembers('opendata') logging.info("Nodes: %d", len(nodes)) addresses = set([eval(node)[0] for node in nodes]) resolve = Resolve(addresses=addresses) resolve.resolve_addresses() REDIS_CONN.publish(publish_key, timestamp) return 0
def result_analysis_udp_socket(): # analysis result file and generate trace: main_config = utils.parse_config("config/config.json")["udp_socket"] #result_config = utils.parse_config(main_config["test_config_file"]) file_list = os.listdir(main_config["result_path"]) file_list.sort() assert len(file_list) != 0, "Empty Analysis directory" df_main = pd.DataFrame() for file in file_list: if file.endswith(".txt"): input_path = os.path.join(main_config["result_path"], file) df_temp = pd.read_csv(input_path, names=["time", "Bandwidth"], header=None, sep="\t") df_main = pd.concat([df_main, df_temp], ignore_index=True) time_bin_size = 60 _para_x_range = [1, -1] time_list = [int(x / time_bin_size) for x in df_main["time"].values] start_time = min(time_list) time_list = [x - start_time for x in time_list] Bandwidth_list = [ round(x / 1000000, 3) for x in df_main["Bandwidth"].values ] df_main = pd.DataFrame(data={ "time": time_list, "Bandwidth": Bandwidth_list }) df_main = df_main.groupby(["time"]).mean().reset_index() time_list = df_main["time"].values[_para_x_range[0]:_para_x_range[1]] Bandwidth_list = df_main["Bandwidth"].values[ _para_x_range[0]:_para_x_range[1]] fig, axs = plt.subplots(nrows=1, ncols=1, **figure_config["single"]) fig.suptitle('Cellular Capacity with Time') axs.plot(time_list, Bandwidth_list, label='Throughput', **plot_lines["normal"]) axs.set_xlabel('Time, Bin size = {}s'.format(time_bin_size)) axs.set_ylabel('Throughput (Mbps)') axs.set_xlim(left=1, right=(max(time_list))) axs.set_ylim(bottom=0, top=(max(Bandwidth_list) * 1.2)) plt.show()
def run(): from utils import parse_config config = parse_config('config.ini') if 'main' not in config: print("Missing main in config.") sys.exit(0) if 'slack' not in config: print("Missing slack in config") sys.exit(0) slackbot = Bot(config) try: slackbot.start() except KeyboardInterrupt: slackbot.quit()
def fix(failed, sys_logger): hostname = [] htmlcode = [] htmldata = [] bookie(fix) failed_urls = ', '.join(failed.keys()) insert_log(sys_logger, failed_urls +" - "+ str(fix.count)) values = failed.values() for name, code, html in values: hostname.append(name) htmlcode.append(code) htmldata.append(html) if fix.count <= 2: hosts = [] hostconf = parse_config(hosts=True) for host in hostconf: for url, code in failed.iteritems(): if host['url'] == url: hosts.append(host['ip']) #define hosts env.hosts = hosts if fix.count == 1: #execute(apache_restart) print env.hosts + "execute(apache_restart)" site_email(hostname, failed.keys(), htmldata, htmlcode, status=True) elif fix.count == 2: #execute(clear_cache) print env.hosts + "execute(clear_cache)" site_email(hostname, failed.keys(), htmldata, htmlcode, status=True, rerun=True) time.sleep(5*fix.count) web_monitor(sys_logger) elif fix.count > 2: site_email(hostname, failed.keys(), htmldata, htmlcode)
def publish(): # parse the config file config = utils.parse_config("config.ini") # store all posts posts = [] # path to our drafts path = os.path.join(os.path.dirname(os.path.abspath(__file__)), config['drafts']) # traverse the entire drafts dir for root, dir, files in os.walk(path): for file in files: with open(os.path.join(root, file)) as fh: post = parse_post(fh, config) if post: make_post(post, config) posts.append(post) make_index(posts, config)
app = Flask(__name__) cors = CORS(app) def get_token(): cmd = app.config.get('token_command') p = subprocess.Popen([cmd], stdout=subprocess.PIPE) s = p.communicate()[0] return s.strip('\n') def get_user(): u = app.config.get('user') return u @app.route('/token') def token(): k = request.args.get('key') if k != app.config.get('api_key'): abort(403) return jsonify({'token': get_token(), 'user': get_user()}), 200 if __name__ == '__main__': crt = os.path.join(os.path.dirname(__file__), 'ssl/server.crt') key = os.path.join(os.path.dirname(__file__), 'ssl/server.key') app.config.update(utils.parse_config()) app.run(host='127.0.0.1', port=15000, ssl_context=(crt, key))
def site_email(hostnames, failed_urls, content, status_code, status=None, rerun=None): msg = MIMEMultipart() attach_txt = txt = '' failed = [] #merge url and status_code lists in one list for x, y in zip(failed_urls, status_code): failed.append(x+" | response code: "+str(y)) failed = ', \n\t'.join(failed) mail = parse_config(mails=True) if 200 in status_code: attach_txt = "Even though it is a valid response code, errors were detected in what was rendered. Please inspect the attached file." #prepare html and attach it to mail message for (hostname, html) in zip(hostnames, content): part = MIMEApplication(html) part.add_header('Content-Disposition', 'attachment', filename=hostname.lower()+'.html') msg.attach(part) #send mail for the first two passes if status: if not rerun: msg['Subject'] = "NOTICE: The following web hosts seems to be down\n" txt += "*** A gentle prod to remind you ***\n" txt += """ It has been detected that the following hosts are down. %s hostnames and response codes: \n: %s Fabricbot tried to clear the cache tables. If you do not receive any more mails, that means that the problem is gone. -- The site monitoring automation""" % (attach_txt, failed) else: msg['Subject'] = "NOTICE: The following web hosts seems to be down\n" txt += "*** A gentle prod to remind you ***\n" txt += """ Again, it has been detected that the following hosts are down. %s hostnames and response codes: \n: %s Fabricbot tried to clear APC cache. If you do not receive any more mails, that means that the problem is gone. -- The site monitoring automation""" % (attach_txt, failed) #send mail for the third pass #bot hasn't succeded to fix the problem else: msg['Subject'] = "WARNING: Apache server not reponding!!!\n" txt += "*** A gentle prod to remind you ***\n" txt += """ As you already know, it has been multiple times detected that the following hosts are down. %s hostnames and response codes: \n: %s Fabricbot tried to clear the drupal cache tables and APC cache. That didn't work. Please log on to servers, check the logs and fix the problem manualy ASAP!!! -- The site monitoring automation""" % (attach_txt, failed) msg['From'] = 'fabricbot' msg['To'] = mail[1] # That is what u see if dont have an email reader: msg.preamble = 'Multipart massage.\n' # This is the textual part: part = MIMEText(txt) msg.attach(part) # Create an instance in SMTP server smtp = SMTP('localhost') # Send the email smtp.sendmail(msg['From'], msg['To'], msg.as_string()) smtp.quit()
hist_output_path = hist_output_dir + '/'+ data_name + '/' + feat + '.hist' write_script(output, "hadoop fs -cat %s | python draw_histogram.py %s" % (data_output_path, hist_output_path)) write_script(output, 'echo "Done drawing feature %s"' % feat) write_script(output, 'echo "All Done"'); output.close() if __name__ == '__main__': if len(sys.argv) < 2: print "Usage: python [config name] [output]" sys.exit(-1) config_name = sys.argv[1] config_dic = parse_config(config_name) ''' feat_idx_dic = dict() for i in range(len(config_dic['features'])): feat = config_dic['features'][i] feat_idx_dic[feat] = i ''' check_list = config_dic['features'] if config_dic['excludes']: check_list = list( set(config_dic['features']) - set(config_dic['excludes']) ) if config_dic['includes']: check_list = config_dic['includes']
def _load_userconfig(self): self.msg.debug(self.name, "Reading userconfig...") self.userconfig = utils.parse_config(self.userconfig_file, utils.userconfig_defaults)
from Page_Object import Experticity as EX import os import utils import time import uuid from datetime import datetime FILE_PATH = os.path.abspath(os.path.dirname(__file__)) CFG =utils.parse_config(FILE_PATH + "/config.ini") STR_FMT = "%Y-%m-%d.%H-%M-%S" class Test_Signup(EX): def test_signup(self): browser = CFG.get("signup", "browser") url = CFG.get("signup", "url") guid = uuid.uuid4() date = datetime.now().strftime(STR_FMT) email = "qa.{browser}.{date}@experticity.com".format(browser=browser, date=date) first_name = CFG.get("signup", "first_name") last_name = CFG.get("signup", "last_name") username = "******".format(guid=guid) password = CFG.get("signup", "password") position = CFG.get("signup", "position") hiredate = CFG.get("signup", "hire_date") referral = CFG.get("signup", "referral") store = CFG.get("signup", "store") zip = CFG.get("signup", "zip")
#!/usr/bin/python # Monitor the state of our features (Brunet, IPOP, and Condor) from SimpleXMLRPCServer import SimpleXMLRPCServer import ip_monitor, time, thread, Queue, os, sys, utils ipop = utils.parse_config("/etc/ipop.vpn.config") ga = utils.parse_config("/etc/grid_appliance.config") class monitor: """Queue commands""" IP = 0 """States""" WAIT_ON_IPOP = 0 CONFIGURING_CONDOR = 1 RUNNING = 2 RESTARTING_IPOP = 3 """Timeouts""" MIN_TIMEOUT = 30 MAX_TIMEOUT = 300 def __init__(self): self.queue = Queue.Queue() self.print_timeout = False self.ipmon = ip_monitor.ip_monitor(self.ip_callback) self.ipmon.start_thread() self.ips = {}
import os import sys import time from utils import can_read, is_build_running, parse_config system_settings = '/etc/fjj/fjjrc' home_settings_dir = os.path.expanduser(os.path.join('~', '.config', 'fjj')) home_settings = os.path.join(home_settings_dir, 'fjjrc') pwd_settings = os.path.join(os.getcwd(), '.fjjrc') settings = {} if can_read(system_settings): settings.update(parse_config(system_settings)) if can_read(home_settings): settings.update(parse_config(home_settings)) if can_read(pwd_settings): settings.update(parse_config(pwd_settings)) if os.environ.get('FJJ_USERNAME'): settings['username'] = os.environ.get('FJJ_USERNAME') if os.environ.get('FJJ_PASSWORD'): settings['password'] = os.environ.get('FJJ_PASSWORD') if os.environ.get('FJJ_JENKINS_URL'): settings['jenkins_url'] = os.environ.get('FJJ_JENKINS_URL')