def main(args): parser = Parser(prog=utils.abs_path('./http_server.py')) parser.add_argument("-port", default=gv.cdn_port(), help="port will run: default 8000") parser.add_argument("-address", default='', help="address bind, default any") parser.add_argument("-path", default=gv.cdn_path(), help="path will run http, default :" + utils.abs_path(gv.cdn_path())) arguments = parser.parse_args(args) run(arguments.port, arguments.address, arguments.path) # parser = argparse.ArgumentParser() # parser.add_argument('--cgi', action='store_true', # help='Run as CGI Server') # parser.add_argument('--bind', '-b', default='', metavar='ADDRESS', # help='Specify alternate bind address ' # '[default: all interfaces]') # parser.add_argument('port', action='store', # default=8000, type=int, # nargs='?', # help='Specify alternate port [default: 8000]') # # args = parser.parse_args() # if args.cgi: # handler_class = CGIHTTPRequestHandler # else: # handler_class = SimpleHTTPRequestHandler # http.server.test(HandlerClass=handler_class, port=args.port, bind=args.bind) pass
def gen_cutflow_yml(ntp1, ntp2, outyml1, outyml2, mode): ntp1 = [ntp1] if not isinstance(ntp1, list) else ntp1 ntp2 = [ntp2] if not isinstance(ntp2, list) else ntp2 run_cmd('cutflow_output_yml_gen.py {} -s -o {} -m run1-{}'.format( ' '.join([abs_path(n) for n in ntp1]), outyml1, mode)) run_cmd('cutflow_output_yml_gen.py {} -s -o {} -m run2-{}'.format( ' '.join([abs_path(n) for n in ntp2]), outyml2, mode))
def workflow_misid( input_ntp, output_ntp='misid.root', misid_aux_ntp='../run2-rdx/reweight/misid/histos/dif.root', misid_config='../run2-rdx/reweight/misid/run2-rdx.yml', **kwargs): aux_ntp = abs_path(misid_aux_ntp) config = abs_path(misid_config) year = find_year(input_ntp) cmd = f'ApplyMisIDWeight -a -i {input_ntp} -o {output_ntp} -x {aux_ntp} -c {config} -Y {year}' return workflow_cached_ntuple( cmd, input_ntp, output_ntp, '--aux_misid', **kwargs)
def run(port=8000, address='', path="./"): utils.register_exit() L.error("p %s a %s p %s", port, address, path) if num_version == 2: L.debug(utils.abs_path(path)) os.chdir(path) handler = SimpleHTTPServer.SimpleHTTPRequestHandler httpd = SocketServer.TCPServer((address, port), handler) httpd.serve_forever() pass else: L.debug(utils.abs_path(path)) os.chdir(path) http.server.test(HandlerClass=SimpleHTTPRequestHandler, port=port, bind=address)
def flip(read_filename, verbose=True): """ Performs flipping operation from start to finish, returns the filename of the flipped file read_filename: string, phase_shift / eigenphase_shift ncsmc output file path verbose: boolean, whether or not to print messages before/after flipping """ if verbose: print("Flipping...\r", end="") read_filename = utils.abs_path(read_filename) # read from original file text_lines, number_lines = sanitize(read_filename) # perform operations to get desired data sections = separate_into_sections(number_lines) # (apparently the column issue has been solved, no need to flip cols) sections = flip_columns(sections) sections = flip_all_sections(sections) # "start from zero" = make sections start within -180 --> 180 sections = start_from_zero(sections) # write to output file new_filename = write_data(sections, text_lines, read_filename) if verbose: print("Your data has been flipped! Output:", new_filename) return new_filename
def __init__(self, parent, size, plot, msg): self.parent = parent self.plot2d = plot self.msgwin = msg self.data = Data() x = Image.open(abs_path(photograph)) # Images are kept along with the python code im = x.resize(size) self.size[0] = float(im.size[0]) self.size[1] = float(im.size[1]) self.image = ImageTk.PhotoImage(im) self.canvas = Canvas(parent, width = im.size[0], height = im.size[1]) self.canvas.create_image(0,0,image = self.image, anchor = NW) self.adblock = self.canvas.create_rectangle(self.dotxy(self.adctl),fill='white') for k in self.douts: k[2] = self.canvas.create_oval(self.dotxy(k), outline="", fill = 'black') for k in self.dins: k[2] = self.canvas.create_oval(self.dotxy(k), outline="", fill = 'black') for k in self.adcs: k[2] = self.canvas.create_oval(self.dotxy(k), outline="", fill = 'black') for k in self.ls_ins: k[2] = self.canvas.create_oval(self.dotxy(k), outline="", fill = 'black') self.led[2] = self.canvas.create_oval(self.dotxy(self.led), outline="", fill = 'black') self.pwg[2] = self.canvas.create_oval(self.dotxy(self.pwg), outline="", fill = 'black') self.dac[2] = self.canvas.create_oval(self.dotxy(self.dac), outline="", fill = 'black') self.cntr[2] = self.canvas.create_oval(self.dotxy(self.cntr), outline="", fill = 'black') self.cmp[2] = self.canvas.create_oval(self.dotxy(self.cmp), outline="", fill = 'black') self.canvas.bind("<ButtonRelease-1>", self.mouse_click) self.canvas.pack()
def _show_diff(self): first = utils.abs_path( '/tmp/bezaur/aur-4369b2d562ca5a526c9e5d96df5949cb51f9cd6f/') second = utils.abs_path( '/tmp/bezaur/aur-f380837df5229c7196d7d9805b06795033b1f5cf') diff_file = '/tmp/bezaur/{}{}diffreview.txt'.format( self.name, self.version) return_code = subprocess.call(['less', diff_file]) if return_code != 0: with open(diff_file, 'w+') as f: for file1, file2 in zip(first, second): subprocess.call( ['diff', file1, file2, '--color=always', '--unified'], stdout=f) subprocess.call(['echo', '\n'], stdout=f) subprocess.call(['less', diff_file])
def get_years_logs(from_year, to_year): logs = {} years_list = [y for y in range(from_year, to_year + 1)] for y in years_list: if os.path.isfile(abs_path('logs/log-{}.json'.format(y))): logs[y] = read_log_only(year=y) return logs
def __init__(self, dosage_info_path, field_drug, field_dosage, max_ratio=0.85): self.max_ratio = max_ratio self.field_drug = field_drug self.field_dosage = field_dosage dosage_infos = load_yaml(abs_path(dosage_info_path)) self.value_dosage_forms = sorted( [dosage_forms for dosage_forms in dosage_infos.get('list_dosage_forms', [])], key=lambda x: -len(x))
def push_result(self, res, id=None): if self.store: from utils import abs_path, save import os path = abs_path(os.path.join("netinfo", id + ".pickle")) with self.io_lock: save(res, path) else: self.res_queue.put({'uuid': id, 'result': res})
def save(json_manifest, json_assets, dest_path, dest_project_manifest): L.debug("save-ing ...") L.debug(json.dumps(json_manifest, indent=4)) path_version = abs_path(join_path(dest_path, version_manifest_name)) L.debug("%s", path_version) inout.write_json(path_version, json_manifest) json_manifest[assets] = json_assets path_project = abs_path(join_path(dest_path, project_manifest_name)) inout.write_json(path_project, json_manifest) L.debug("%s", path_project) path_project = abs_path( join_path(dest_project_manifest, project_manifest_name)) inout.write_json(path_project, json_manifest) L.debug("project.manifest => %s", path_project) L.debug("save success !!!")
def __init__(self, num_classes, weight_path, image_size, cuda): super(CardRotator, self).__init__() self.image_size = image_size self.device = torch.device('cuda' if cuda else 'cpu') self.model = MobileNetV2(num_classes) self.model.load_state_dict( torch.load(utils.abs_path(weight_path), map_location='cpu')) self.model.to(self.device) self.model.eval()
def workflow_single_ntuple(input_ntp, input_yml, output_suffix, aux_workflows, cpp_template='../postprocess/cpp_templates/rdx.cpp', **kwargs): input_ntp = ensure_file(input_ntp) print('{}Working on {}...{}'.format(TC.GREEN, input_ntp, TC.END)) cpp_template = abs_path(cpp_template) bm_cmd = 'babymaker -i {} -o baby.cpp -n {} -t {}' aux_ntuples = [w(input_ntp, **kwargs) for w in aux_workflows] if aux_ntuples: bm_cmd += ' -f ' + ' '.join(aux_ntuples) bm_cmd = workflow_bm_cli(bm_cmd, **kwargs).format( abs_path(input_yml), input_ntp, cpp_template) run_cmd(bm_cmd, **kwargs) workflow_compile_cpp('baby.cpp', **kwargs) run_cmd('./baby.exe --{}'.format(output_suffix), **kwargs)
def execute_func(key, args): key = key.strip().lower() if key in config.key: if key == 'info': info.main(args) if key == 'gen': gen.main(args) if key == 'cdn': update_manifest.main(args) if key == 'update-dev': gv.cdn_set_package_url(network.update_host(gv.cdn_package_url())) gv.save() if key == "update-cdn-url": gv.cdn_set_package_url(network.update_host(gv.cdn_package_url())) gv.save() if key == "android-gen": android_build.main(args) if key == 'cdn-run': print(gv.ROOT_DIR) path = os.path.join(gv.ROOT_DIR, "http_server.py") path = os.path.abspath(path) # cmd = "python {0} -port {1} -path {2}".format(path, gv.cdn_port(), utils.abs_path(gv.cdn_path())) print(gv.cdn_package_url()) http_server.run(gv.cdn_port(), "", utils.abs_path(gv.cdn_path())) # os.system(cmd) if key == 'jslist': cmd = 'jslist -f "{0}"'.format( utils.abs_path( utils.join_path(gv.client_path(), "./project.json"))) print(cmd) os.system(cmd) if key == 'run': cmd = 'cocos run --proj-dir="{0}" -p web -m debug'.format( utils.abs_path(gv.client_path())) print(cmd) os.system(cmd) if key == 'quit' or key == 'exit' or key == 'q': L.info(">>Quit!") sys.exit(0) pass
def save(json_content, path): path = abs_path(path) # json_content = {} # for key, value in json_content.iteritems(): # print("key: {} | value: {}".format(key, value)) # content = pattern_save.replace("@content", json.dumps(json_content, indent=4)) content = pattern_save.replace("@content", json_stringify(json_content)) write(path, content) L.debug("success => save to file:" + path)
def __init__(self): self.project = Project( "offer_classifier", # Ajout du modèle MultinomialNB au projet prediction_functions=[mnb_clf]) # Writer qui permet de stocker les résultats de la prédiction self.writer = StoreResult( abs_path("databases/offer_classification.db")) # Enregistrement de la procédure de stockage en tant que hook de post processing Hyperplan (voir la doc officielle) self.project.register_post_hook(self.writer)
def get_active_window_names(): abs_filepath = abs_path('get_window.sh') win_id = subprocess.getoutput(abs_filepath).strip() if len(win_id.split()) != 2: return ['other'] names = win_id.split(', ') for i in range(len(names)): names[i] = names[i].strip('"') return names
def get_all_app_names(): app_names_list = [] with open(abs_path('app_names.json')) as f: app_names_dict = json.load(f) for key in app_names_dict: if app_names_dict[key] not in app_names_list: app_names_list.append(app_names_dict[key]) app_names_list.append('other') return app_names_list
def __init__(self, parent, size, plot, msg): self.parent = parent self.plot2d = plot self.msgwin = msg x = Image.open(abs_path(photograph)) im = x.resize(size) self.size[0] = float(size[0]) self.size[1] = float(size[1]) self.image = ImageTk.PhotoImage(im) self.canvas = Canvas(parent, width = size[0], height = size[1]) self.canvas.create_image(0,0,image = self.image, anchor = NW) self.data = Data()
def calc_mean_var(cfg): dataset_manager = BIC_Dataset(train_dir=abs_path(cfg['TRAIN_IMG_DIR']), test_dir=abs_path(cfg['TEST_IMG_DIR']), train_save_dir=abs_path(cfg['TRAIN_TILE_DIR']), test_save_dir=abs_path(cfg['TEST_TILE_DIR']), label_2_id=cfg['PURE_LABELS'], tile_size=cfg['TILE_SIZE'], stride=cfg['STRIDE'], dev_per=cfg['DEV_PER'], order=cfg['ORDER'], randomize=cfg['RANDOMIZE_TRAIN'], transform=transform, redo_preprocessing=cfg['REDO_PREPROCESSING']) mu, std = img_norm_calc(dataset_manager.train) with open("mean_std.txt", 'w') as f: f.write("mu=\n") f.write(mu) f.write("std=\n") f.write(std)
def single_run(cfg): inp_dir = cfg['INPUT_DIR'] res_dir = cfg['RES_DIR'] meta_name = cfg['META_NAME'] print("Prepping Dataset ...") transform = Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) db_man = TUPACDataset(image_dir=abs_path(inp_dir, meta_name, cfg['IMG_DIR']), label_dir=abs_path(inp_dir, meta_name, cfg['LABEL_DIR']), label_2_id=cfg['PURE_LABELS'], tile_size=cfg['TILE_SIZE'], split=cfg['SPLIT'], mu=cfg['MEAN'], std=cfg['STD'], randomize=cfg['RANDOMIZE_TRAIN'], transform=transform, pre_visualize=cfg['PRE_VISUALIZE']) test_loader = DataLoader(dataset=db_man.full, batch_size=cfg['BATCH_SIZE'], num_workers=cfg['NUM_WORKERS'], collate_fn=db_man.batchify) print("Prepping Model ...") the_model = torch.load(cfg['MODEL_SAVE_FILE']) print("running test") test_eval, pred_list, true_list = test_a_epoch(name="test", data=test_loader, model=the_model, result_file=abs_path(res_dir, meta_name, cfg['TEST_RES_FILE']), label2id=cfg['PURE_LABELS']) test_eval.print_results()
def gen_folders(json_result, path, parent): path = abs_path(path) files = os.listdir(path) # L.info("+ dir: %s", path) for name in files: full_path = join_path(path, name) if os.path.isdir(full_path): json_result = gen_folders(json_result, full_path, join_path(parent, name)) elif os.path.isfile(full_path): json_result = gen_files(json_result, parent, name, full_path) return json_result
def main(args): # load default num_version = gv.cdn_version() client_path = gv.client_path() package_url = gv.cdn_package_url() cdn_deploy_path = gv.cdn_path() dst_project_manifest = gv.client_path() folder_will_gen = gv.cdn_manifest_folder_gen() if len(args) > 0: parser = Parser(prog=utils.abs_path('./update_manifest.py')) parser.add_argument("-ver", "--version", default=num_version, help="version manifest") parser.add_argument("-cli", "--client_path", default=client_path, help="version manifest") parser.add_argument("-pak", "--package_url", default=package_url, help="version manifest") parser.add_argument("-cdn", "--cdn_path", default=cdn_deploy_path, help="version manifest") parser.add_argument("-dst", "--dst_project_manifest", default=dst_project_manifest, help="version manifest") parser.add_argument("-fol", "--folder_gen", default=folder_will_gen, help="version manifest") arguments = parser.parse_args(args) num_version = arguments.version client_path = arguments.client_path package_url = arguments.package_url cdn_deploy_path = arguments.cdn_path dst_project_manifest = arguments.dst_project_manifest folder_will_gen = arguments.folder_gen gen_manifest(num_version, client_path, package_url, cdn_deploy_path, dst_project_manifest, folder_will_gen) if gv.cdn_auto_increment(): nums = num_version.split(".") nums[-1] = int(nums[-1]) + 1 num_version = '.'.join(str(x) for x in nums) gv.cdn_set_version(num_version) gv.save()
def get_application_name(window_names): with open(abs_path('app_names.json')) as f: name_dict = json.load(f) app_name = "" for name in window_names: try: app_name = name_dict[name.lower()] break except KeyError: app_name = "other" return app_name
def fetchMails(self): reader = StoreMails(abs_path('databases/mail_offers.db')) mailsInformations = [] for mail in reader.fetch_mails(): informations = [] for element in mail: informations.append(element) mailsInformations.append(informations) return mailsInformations
def __init__(self, arch_config, warper_config, pred_score_threshold, nms_iou_threshold, card_area_threshold, weight_path, image_size, cuda): super(CardExtractor, self).__init__() self.image_size = image_size self.nms_iou_threshold = nms_iou_threshold self.card_area_threshold = card_area_threshold self.pred_score_threshold = pred_score_threshold self.device = torch.device('cuda' if cuda else 'cpu') self.card_warper = utils.create_instance(warper_config) self.model = utils.create_instance(arch_config) self.model.load_state_dict( torch.load(utils.abs_path(weight_path), map_location='cpu')) self.model.to(self.device) self.model.eval()
def gen_hash(args): mode = "" if len(args) > 0: parser = Parser(prog=utils.abs_path('./android_build.py')) parser.add_argument("-m", "--mode", default=mode, help="gen_hash() failed! |help: -m release|debug") arguments = parser.parse_args(args) mode = arguments.mode if mode == "debug": return _gen_hash_debug() if mode == "release": return _gen_hash_release() L.error("gen_hash() failed! |help: -m release|debug")
def storeMails(self, imap, start=False): mailList = self.getMails(imap, start) if (len(mailList) > 0): writer = StoreMails(abs_path('databases/mail_offers.db')) for i in range(len(mailList)): writer.write_result(mailList[i][0], mailList[i][1], mailList[i][2], mailList[i][3], mailList[i][4], mailList[i][5]) print('DB updated\n') return True elif (len(mailList) == 0): return False
def main(args): gen = "" if len(args) > 1: parser = Parser(prog=utils.abs_path('./android_build.py')) parser.add_argument("-g", "--gen", default=gen, help="gen() failed! |help: -g keystore|hash") arguments = [args.pop(0), args.pop(0)] arguments = parser.parse_args(arguments) gen = arguments.gen if gen == "keystore": gen_key(args) elif gen == "hash": gen_hash(args) else: L.error("failed! |help: android-gen -g keystore|hash")
def gen_manifest(num_version, client_path, package_url, dest_path, dest_project_manifest, folder_will_gen): """ :param client_path: :param folder_will_gen: :param num_version: :param package_url: :param dest_path: :param dest_project_manifest: :return: """ json_manifest = { packageUrl: package_url, remoteManifestUrl: join_path(package_url, project_manifest_name), remoteVersionUrl: join_path(package_url, version_manifest_name), version: num_version } L.error(utils.abs_path(dest_path)) # json_assets = gen_folders({}, client_path, '') json_assets = {} for folder in folder_will_gen: json_assets = gen_folders(json_assets, join_path(client_path, folder), folder) copy_tree(join_path(client_path, folder), join_path(dest_path, folder)) #project.json file_name = "project.json" json_assets = gen_files(json_assets, '', file_name, join_path(client_path, file_name)) copy_file(join_path(client_path, file_name), join_path(dest_path, file_name)) #main.js file_name = "main.js" json_assets = gen_files(json_assets, '', file_name, join_path(client_path, file_name)) copy_file(join_path(client_path, file_name), join_path(dest_path, file_name)) save(json_manifest, json_assets, dest_path, dest_project_manifest) # L.debug(json.dumps(json_assets, indent=4)) pass
def test_challenge31(self): # This test relies on having the web server running on port 8080: # python leak.py # HMAC-sha1 produces a 160 bit (20 byte) hash expected_hmac_length = 20 path = utils.abs_path('10.txt') signature = '\x00' * expected_hmac_length for i in range(expected_hmac_length): print '{}: {}'.format(i, convert.bytes_to_hex(signature)) outlier_score = 0.0 while outlier_score < 2.0: all_candidates = (chr(v) for v in range(256)) timings = generate_timings(path, signature, i, all_candidates) print 'timings: {}'.format([(convert.bytes_to_hex(t), v) for t, v in timings[:5]]) # The byte that produces the slowest response from the server # is likely the correct one. All of the incorrect bytes should # have the same response time (N). The correct byte should be # a little bit slower (N + x). # # However, there is some variance in the server response times # so check again if the calculated value of x is not # significantly larger than the next difference between two # incorrect bytes. If so, try again. most_likely, elapsed_time = timings[0] _, second_slowest_elapsed_time = timings[1] _, third_slowest_elapsed_time = timings[2] outlier_score = ( (elapsed_time - second_slowest_elapsed_time) / (second_slowest_elapsed_time - third_slowest_elapsed_time)) print 'outlier_score: {}'.format(outlier_score) signature = signature[:i] + most_likely + signature[i + 1:] response, _ = hmac_test(path, signature) self.assertEquals(200, response.status_code)
def predict(text) -> dict: words = re.findall("[\w']+", text) # count times of appearance of each word frequency = dict(collections.Counter(words)) dataframe = pd.DataFrame(frequency.items(), columns=["word", "n_appear"]) # Nettoyage stopwords = set() stopwords_path = abs_path("algorithms/tf_idf_heuristic/stopWordsFR.txt") with open(stopwords_path) as file: for word in file: stopwords.add(word[:-1]) dataframe['word'] = dataframe['word'].astype(str) filtered_df = dataframe[~dataframe["word"].isin(stopwords)] # Attribution des catégories keywords = { "développement": 0, "web": 0, "développeur": 0, "front": 0, "backend": 0, "c++": 0, "machine": 1, "learning": 1, "data": 1, "image": 2, "discretisation": 2, "points": 2, "objet": 2, "grille": 2, "3D": 2 } categories = ["Developpement", "MachineLearning", "TraitementImage"] keyword_df = pd.DataFrame(keywords.items(), columns=["word", "category"]) categories_df = pd.DataFrame(categories, columns=["label"]) in_text_kw_df = keyword_df.merge(filtered_df, left_on='word', right_on='word') cat_count = in_text_kw_df.groupby(['category']).sum() categories_df['n_appear'] = cat_count['n_appear'] categories_df['exponential'] = np.exp(categories_df['n_appear']) exp_sum = np.sum(categories_df['exponential']) if exp_sum != 0: categories_df['probability'] = categories_df['exponential'] / exp_sum else: categories_df['probability'] = 1 / categories_df.shape[0] result_probability = categories_df[['label', 'probability']] result_probability = result_probability.fillna(0) labels_dict = result_probability.to_dict() return labels_dict