def _stop_wpa_supplicant(interface): """Stops wpa_supplicant from running on the given interface. Also removes the pid file and sets the interface down. Args: interface: The interface on which to stop wpa_supplicant. Returns: Whether wpa_supplicant was successfully stopped. """ if not _is_wpa_supplicant_running(interface): utils.log('wpa_supplicant already not running.') return True pid_filename = utils.get_filename('wpa_supplicant', utils.FILENAME_KIND.pid, interface, tmp=True) config_filename = utils.get_filename('wpa_supplicant', utils.FILENAME_KIND.config, interface, tmp=True) if not utils.kill_pid('wpa_supplicant .* %s$' % config_filename, pid_filename): return False try: subprocess.check_call(('ip', 'link', 'set', interface, 'down')) except subprocess.CalledProcessError: return False return True
def directory_merge(existing_people, new_people): perfect_matched = set() matches = [] for new in new_people: best_similarity = 0 best_match = None for existing in existing_people: similarity = calculate_similarity(existing, new) if similarity > 0.999: perfect_matched.add(new['id']) continue if similarity > best_similarity: best_similarity = similarity best_match = existing matches.append((best_similarity, new, best_match)) click.secho(f'{len(perfect_matched)} were perfect matches', fg='green') unmatched = set(p['id'] for p in new_people) - perfect_matched for sim, new, old in sorted(matches, reverse=True, key=lambda x: x[0]): if sim < 0.001: break unmatched.remove(new['id']) click.secho(' {:.2f} {} {}'.format(sim, get_filename(new), get_filename(old)), fg='yellow') click.secho(f'{len(unmatched)} were unmatched')
def apiV1Upload(): # filekeyがある? filekey = flask.request.headers.get("X-Kyoppie-File-Key") if(filekey != config.file["file_key"]): return {"result":False,"error":"invalid-filekey"},400 file = flask.request.files.get("file") if(not file): return {"result":False,"error":"file-is-required"},400 # とりあえずセーブ filename = "../upload_tmp/"+utils.get_temp_save_filename() file.save(filename) print(filename) # ファイルの種類を判断する path = "../files" mimetype = magic.Magic(mime=True).from_file(filename) print(mimetype) img = None res_obj = { "type":mimetype.split("/")[0] } if(mimetype == "image/png" or mimetype == "image/bmp"): #可逆圧縮な画像ファイル new_filename = utils.get_filename("png") img = PIL.Image.open(filename) img.save(path+new_filename,"png") res_obj["url"] = new_filename elif(mimetype == "image/jpeg" or mimetype == "image/jpg"): new_filename = utils.get_filename("jpg") img = PIL.Image.open(filename) img.save(path+new_filename,"jpeg",quality=80) res_obj["url"] = new_filename elif(mimetype == "image/gif"): new_filename = utils.get_filename("png") img = PIL.Image.open(filename) try: img.seek(1) except EOFError: img.save(path+new_filename,"png") res_obj["url"] = new_filename else: new_filename,img = utils.video_encode(filename) res_obj["type"] = "video" res_obj["url"] = new_filename elif(mimetype == "video/mp4" or mimetype == "video/quicktime"): new_filename,img = utils.video_encode(filename) res_obj["url"] = new_filename else: return {"result":False,"error":"invalid-file"},400 if(img): img.thumbnail(utils.get_resize_size(img.size)) if("".join(img.getbands()) == "RGBA"): img.save(path+new_filename+".thumbnail.png","png") res_obj["thumbnail"] = new_filename+".thumbnail.png" else: img.save(path+new_filename+".thumbnail.jpg","jpeg",quality=75) res_obj["thumbnail"] = new_filename+".thumbnail.jpg" return res_obj,200
def interactive_merge(abbr, old, new, name_match, role_match, retirement): """ returns True iff a merge was done """ oldfname = "data/{}/people/{}".format(abbr, get_filename(old)) newfname = "incoming/{}/people/{}".format(abbr, get_filename(new)) click.secho(" {} {}".format(oldfname, newfname), fg="yellow") # simulate difference changes = compute_merge(old, new, keep_both_ids=False) if not changes: click.secho(" perfect match, removing " + newfname, fg="green") os.remove(newfname) return True for change in changes: if change.key_name == "name" or change.key_name == "roles": click.secho(" " + str(change), fg="red", bold=True) else: click.echo(" " + str(change)) ch = "~" if name_match and role_match: choices = "m" # automatically pick merge ch = "m" # there is one very specific case that this fails in, if someone is beaten # by someone with the exact same name, that'll need to be caught manually elif name_match: choices = "m" text = "(m)erge?" elif role_match: choices = "mr" text = f"(m)erge? (r)etire {old['name']}" while ch not in (choices + "sa"): click.secho(text + " (s)kip? (a)bort?", bold=True) ch = click.getchar() if ch == "a": raise SystemExit(-1) elif ch == "m": merged = merge_people(old, new, keep_both_ids=False) dump_obj(merged, filename=oldfname) click.secho(" merged.", fg="green") os.remove(newfname) elif ch == "r": copy_new_incoming(abbr, new, "people") retire(abbr, old, new, retirement) elif ch == "s": return False return True
def __init__(self): super(CsrBuildGadget, self).__init__() self.schedule_phase = 'pre_build' self.name = 'csr3' self.queue = 'build' self.interactive = False dir = os.path.join(gvars.RootDir, "verif/vkits/csr") self.stdoutPath = get_filename(os.path.join(dir, '.csr3_stdout')) self.mergeStderr = True self.done_file = get_filename(os.path.join(dir, ".csr3_done"))
def parseargs(self, train_none, test_none): print(train_none) argparser = ArgumentParser() if train_none: argparser.add_argument('--train', type=str, default='raw/train.csv') if test_none: argparser.add_argument('--test', type=str, default='raw/test.csv') args = argparser.parse_args(fixed_argv(__file__)) self.train_name = get_filename(args.train) self.test_name = get_filename(args.test) return args
def __init__(self, entry): super(VkitGadget, self).__init__() # ensure that check_dependencies must only run once self.checked_dependencies = None # The vkit is either a dictionary, or a vcfg.py file located in the specified path from vkits_dir, # or it's simply a name that can be applied to a default dictionary config = {} if type(entry) == dict: config = entry elif type(entry) == str: if entry.endswith('.py') and os.path.exists(entry): config = self.load_vcfg(entry) else: # create a simple default vkit config = {'NAME':entry, 'DIR':entry, 'FLIST':entry} self.make_assignments(config, entry) # these variables are necessary when running in genip mode self.schedule_phase = 'genip' self.resources = gvars.PROJ.LSF_VLOG_LICS self.queue = 'build' self.interactive = False try: self.runmod_modules = [gvars.PROJ.MODULES[key] for key in gvars.VLOG.MODULES] except KeyError: Log.critical("Unknown module in VLOG.MODULES: {}".format(gvars.vlog.MODULES)) self.cwd = self.dir_name self.lib_name = '{}_LIB'.format(self.name.upper()) Log.debug("name={}, dir_name={}, lib_name={}".format(self.name, self.dir_name, self.lib_name)) self.stdoutPath = utils.get_filename(os.path.join(self.dir_name, '{}.stdout'.format(self.lib_name))) self.mergeStderr = True self.genip_done_file = utils.get_filename(os.path.join(self.dir_name, '{}.genip_done'.format(self.lib_name))) self.genip_completed = False self.pkg_dir = os.path.join(self.dir_name, self.pkg_name) Log.debug("Sending to pkg_dir: {}".format(self.pkg_dir)) # ensures that the "...waiting for..." message is only printed once per vkit self.printed_waiting_for = False # in genip mode, run as a gadget, add the ssim gadget to # ensure that the synopsys_sim.setup file is created. if gvars.VLOG.COMPTYPE == 'genip': import schedule import gadgets.ssim schedule.add_gadget(gadgets.ssim.SsimGadget(self))
def CBC_decry(init_vec): filename = utils.get_filename() print("File to Decrypt:", filename) # Grabbing original file extension file_extension = filename.split('.cypher', 1)[0] if len(file_extension.split('.', 1)) != 1: file_extension = '.' + file_extension.split('.', 1)[1] else: file_extension = '' M_bloc = [] C_cut = utils.read_file(filename) C_cut_int = int_from_bytes(C_cut[0]) m_0 = decryption(C_cut_int) ^ init_vec M_bloc.append(m_0) for i in range(1, len(C_cut)): C_cut_int = int_from_bytes(C_cut[i]) m_i = decryption(C_cut_int) ^ int_from_bytes(C_cut[i - 1]) M_bloc.append(m_i) message_dechiffre = M_bloc[0] for i in range(1, len(M_bloc)): M_bloc_bytes = int_to_bytes(M_bloc[i]) message_dechiffre = message_dechiffre << (len(M_bloc_bytes) * 8) ^ M_bloc[i] message_dechiffre_bytes = int_to_bytes(message_dechiffre) print(message_dechiffre_bytes) ######################<Generate a file>######################### utils.write_clair_file(message_dechiffre_bytes.decode("utf-8"), file_extension)
def CBC_encry(init_vec): filename = utils.get_filename() cypher_file = filename + '_CBC' + '.cypher' # Fonction de coupage M_cut = [] M_cut = utils.read_file(filename) # Liste des blocs chiffres C_bloc = [] M_cut_int = int_from_bytes(M_cut[0]) c_0 = encryption(M_cut_int ^ init_vec) C_bloc.append(c_0) for i in range(1, len(M_cut)): M_cut_int = int_from_bytes(M_cut[i]) c_i = encryption(M_cut_int ^ C_bloc[i - 1]) C_bloc.append(c_i) # Print tous les éléments # message_chiffre = C_bloc[0] for i in range(1, len(C_bloc)): C_bloc_bytes = int_to_bytes(C_bloc[i]) message_chiffre = message_chiffre << (len(C_bloc_bytes) * 8) ^ C_bloc[i] #print(C_bloc_bytes) message_chiffre_bytes = int_to_bytes(message_chiffre) ######################<Generate a file>######################### utils.write_cypher_file(cypher_file, int_to_bytes(message_chiffre))
def main(): for doc in posterous_docs(settings.posterous_primary_sitename): bucket = settings.archivedotorg_pdf_bucket if not archive.exists(bucket, utils.get_filename(doc)): item = utils.download(doc) archive.upload(bucket, item)
def do_add(self, arg): """Adds a song to the playlist specified as the argument to this command""" f = get_filename(arg) if f: self.playlist.put(f) else: print("Not a valid selection to add to the playlist.")
def download_subset_file(subset_url, dataset_dir): """ Download a subset segments file from the given url to the given directory. Args: subset_url: URL to subset segments file (Type: str) dataset_dir: Dataset directory where subset segment file will be stored (Type: str) Returns: subset_path: Path to subset segments file (Type: str) """ # Get filename of the subset file subset_filename = get_filename(subset_url) subset_name = get_subset_name(subset_url) subset_path = os.path.join(dataset_dir, subset_filename) os.makedirs(dataset_dir, exist_ok=True) # Open subset file as a CSV if not os.path.exists(subset_path): LOGGER.info('Downloading subset file for "{}"'.format(subset_name)) with open(subset_path, 'w') as f: subset_data = urllib.request.urlopen(subset_url).read().decode() f.write(subset_data) return subset_path
def check_duplicates(self): """ duplicates should already be stored in self.duplicate_values this method just needs to turn them into errors """ errors = [] for key, values in self.duplicate_values.items(): for value, instances in values.items(): if len(instances) > 1: if len(instances) > 3: instance_str = ", ".join(get_filename(i) for i in instances[:3]) instance_str += " and {} more...".format(len(instances) - 3) else: instance_str = ", ".join(get_filename(i) for i in instances) errors.append(f'duplicate {key}: "{value}" {instance_str}') return errors
def __init__(self, master, filepath: str, new_file=False): tk.Frame.__init__(self, master) self.new_file = new_file self.filepath = filepath self.filename = get_filename(filepath) if not new_file else filepath self.master = master self.modified = False self.text_editor = ScrolledText(self, font=("", 15), undo=True, maxundo=-1, wrap="none") self.text_editor.config(highlightthickness=0, bd=0) self.text_editor.grid(row=0, column=1, sticky=tk.NSEW) self.scrollbar_x = tk.Scrollbar(self, orient=tk.HORIZONTAL, command=self.text_editor.xview) self.scrollbar_x.grid(row=1, column=0, columnspan=2, stick=tk.EW) self.text_editor.configure(xscrollcommand=self.scrollbar_x.set) self.line_nb_canvas = tk.Canvas(self, bg=self.text_editor.cget("bg"), bd=0, highlightthickness=0) self.line_nb_canvas.grid_propagate(False) self.line_nb_canvas.grid(row=0, column=0, sticky=tk.NS) self.grid_rowconfigure(0, weight=1) self.grid_columnconfigure(1, weight=1) self.default_file_content = str()
def upload(file_path): """Upload a file to Streamable.com Args: file_path (string): Path to the video to be uploaded. Returns: A string that represents the url to the uploaded video """ with open(file_path, 'rb') as file: filename = utils.get_filename(file_path) content = {'file': (filename, file)} auth = DataStore.get_streamable_secrets() headers = { 'user-agent': 'lol-highlights-enhancer/1.0.0 ([email protected])', 'Authorization': f'Basic {auth}' } api_url = 'https://api.streamable.com/upload' response = requests.post(api_url, files=content, headers=headers) shortcode = response.json()['shortcode'] video_url = f'https://streamable.com/{shortcode}' return video_url
def get_links_from_url(url): # extract all the links from this url page page = get_filename(url) with open(page) as f: html_page = f.read() return get_links_from_wiki(html_page)
def upload(file_path): """Uploads a video to gfycat Args: file_path (string): Path to the video to be uploaded. Returns: A string representing the url to the uploaded video. """ filename = utils.get_filename(file_path) body = { 'title': filename, 'tags': ['leagueoflegends', 'League of Legends'], 'noMd5': True } token = DataStore.get_gfycat_token() auth_header = {'Authorization': f'Bearer {token}'} url = f'{BASE_URL}/gfycats' response = requests.post(url, json=body, headers=auth_header) gfyname = response.json()['gfyname'] upload_url = f'https://filedrop.gfycat.com/{gfyname}' with open(file_path, 'rb') as video: response = requests.put(upload_url, video) video_url = f'https://gfycat.com/{gfyname}' return video_url
def nmt_check_overfitting(easy_config): training_filename = utils.get_filename(exp_config["training_corpus"]) import commands lines = int(commands.getstatusoutput('wc -l ' + os.path.join(easy_config.easy_corpus, training_filename+'.clean.'+exp_config["source_id"]))[1].split(' ')[0]) print "pairs ", lines # return from nmt import overfitting_prepare base = 10 if lines / 3000 > base: base = lines / 3000 overfitting_prepare(easy_config, training_filename, base) command1 = "python " + easy_config.nmt_path + "sample.py"\ + " --beam-search "\ + " --beam-size 12"\ + " --state " + os.path.join(easy_config.easy_train, "search_state.pkl")\ + " --source " + os.path.join(easy_config.easy_overfitting,"OF.clean." + exp_config["source_id"])\ + " --trans " + os.path.join(easy_config.easy_overfitting, "ontrain." + exp_config["target_id"])\ + " " + os.path.join(easy_config.easy_train, "search_model.npz") command2 = (easy_config.mosesdecoder_path + "scripts/generic/multi-bleu.perl "\ + " -lc " + easy_config.easy_overfitting + "/OF.clean." + exp_config["target_id"]\ + " < " + easy_config.easy_overfitting + "/ontrain." + exp_config["target_id"]) write_step(command1, easy_config) os.system(command1) write_step(command2, easy_config) os.system(command2)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--path", help="input file path") parser.add_argument("--save", default="./", help="save file path") args = parser.parse_args() source_file_path = args.path save_file_path = args.save print("source from the path => " + source_file_path) print("save file path => " + save_file_path) file = open(source_file_path) lines = file.readlines() file.close() file_content = file_content_to_string(lines) save_file_name = save_file_path + get_filename(source_file_path) print("Now Parsing...") scanner = LexScanner(file_content) parser = LRParser(scanner) parser.parse() parser.write_tree_to_file(save_file_name) print("Finish!") print("Now Generating code...") tree = parser.get_ast_tree() generator = CodeGenerator(tree) generator.generate() generator.write_code_to_file(save_file_name) print("Finish!")
def _merge_tiles(path: Union[Path, str], tiles_dir: Union[Path, str], tms_bbox: Tuple[int, int, int, int], zoom: int, tiles_format: ImageFormat) -> None: # language=rst """ Merge tiles from `tms_bbox` area from `tiles_dir` with `zoom` zoomlevel and `tiles_format` image format in image file with `path` without georeference :param path: :param tiles_dir: :param tms_bbox: area of the tms coordinates in the form of: `(min_x, min_y, max_x, max_y)` :param zoom: :param tiles_format: :return: """ min_x, min_y, max_x, max_y = tms_bbox rows = list() for y in range(max_y, min_y - 1, -1): row = list() for x in range(min_x, max_x + 1): tile_path = get_filename(Tile.from_tms(x, y, zoom), tiles_format, tiles_dir) row.append(cv2.imread(str(tile_path))) row_img = cv2.hconcat(row) rows.append(row_img) data = cv2.vconcat(rows) cv2.imwrite(path, data)
def train_max_epochs(self, args, train0, train1, dev0, dev1, vocab, no_of_epochs, writer, time, save_epochs_flag=False, save_batch_flag=False, save_batch=5): print("No of epochs: ", no_of_epochs) self.train() self.enc_optim = optim.AdamW(self.encoder.parameters(), lr=args.learning_rate, betas=(self.beta1, self.beta2)) self.gen_optim = optim.AdamW(self.generator.parameters(), lr=args.learning_rate, betas=(self.beta1, self.beta2)) self.discrim1_optim = optim.AdamW(self.discriminator1.parameters(), lr=args.learning_rate, betas=(self.beta1, self.beta2)) self.discrim2_optim = optim.AdamW(self.discriminator2.parameters(), lr=args.learning_rate, betas=(self.beta1, self.beta2)) Path(args.saves_path).mkdir(parents=True, exist_ok=True) saves_path = os.path.join(args.saves_path, utils.get_filename(args, time, "model")) Path(saves_path).mkdir(parents=True, exist_ok=True) flag = True with autograd.detect_anomaly(): for epoch in range(no_of_epochs): random.shuffle(train0) random.shuffle(train1) batches0, batches1, _1, _2 = utils.get_batches(train0, train1, vocab.word2id, args.batch_size, noisy=True) dev_batches0 = [] dev_batches1 = [] if self.args.dev: dev_batches0, dev_batches1, _, _ = utils.get_batches(dev0, dev1, vocab.word2id, args.batch_size, noisy=True) # batches0, batches1, _1, _2 = utils.get_batches_bpe(train0, train1, vocab.word2id, # args.batch_size, noisy=True) random.shuffle(batches0) random.shuffle(batches1) print("Epoch: ", epoch) self.logger.info("Epoch: "+str(epoch)) train_flag = self(args, batches0, batches1, dev_batches0, dev_batches1, vocab, no_of_epochs, epoch, writer, time, save_epochs_flag=False, save_batch_flag=False, save_batch=5) if train_flag: break
def download_tiles(tiles_dir: Union[Path, str], bbox: Tuple[float, float, float, float], zoom: int, map_: Type[maps.Map], proxies: Optional[dict] = None) -> None: # language=rst """ Download tiles from `bbox` with `zoom` zoomlevel to `tiles_dir` from `map_` using `proxies`. :param tiles_dir: :param bbox: area of the geo coordinates in the form of: `(min_lat, min_lon, max_lat, max_lon)` :param zoom: :param map_: :param proxies: :return: """ session = requests.session() if proxies is not None: session.proxies = proxies for tile in get_tile_gen(bbox, zoom): path = get_filename(tile, map_.tiles_format, tiles_dir) if not path.exists(): for url in map_.get_urls_gen(tile): response = session.get(url) time.sleep(map_.get_timeout()) if response.ok: with open(path, 'wb') as file: file.write(response.content) break session.close()
def download_sequence_group(accession, format, study_dir): print('Downloading sequences') update_accs = [] dest_file = os.path.join(study_dir, utils.get_filename(accession + '_sequences', format)) #sequence update temp_file = os.path.join(study_dir, 'temp.txt') download_report(utils.SEQUENCE, utils.SEQUENCE_UPDATE_RESULT, accession, temp_file) f = open(temp_file) header = True for line in f: if header: header = False continue data_accession = line.strip() update_accs.append(data_accession) sequenceGet.append_record(dest_file, data_accession, format) f.close() os.remove(temp_file) #sequence release temp_file = os.path.join(study_dir, 'temp.txt') download_report(utils.SEQUENCE, utils.SEQUENCE_RELEASE_RESULT, accession, temp_file) f = open(temp_file) header = True for line in f: if header: header = False continue data_accession = line.strip() if data_accession not in update_accs: sequenceGet.append_record(dest_file, data_accession, format) f.close() os.remove(temp_file)
def save_pos_samples(root_folder, folder_to, train=True, subset_size=None): """Se encarga de cargar todos los samples positivos""" lst_file_folder = 'Train' if train else 'Test' lst_pos_file = os.path.join(lst_file_folder, 'pos.lst') lst_annotations_file = os.path.join(lst_file_folder, 'annotations.lst') content_pos = open(os.path.join(root_folder, lst_pos_file)) # Abro el listado de imagenes positivas content_annotations = open(os.path.join(root_folder, lst_annotations_file)) # Abro el listado de imagenes positivas content_pos_lines = content_pos.readlines() content_annotations_lines = content_annotations.readlines() # Si fue especificado un tamaño de subset recorto la lista de lineas if subset_size: combined = list(zip(content_pos_lines, content_annotations_lines)) # Los pongo en orden aleatorio cuando genero subset random.shuffle(combined) content_pos_lines, content_annotations_lines = zip(*combined) # Recorto el numero de resultados content_pos_lines = content_pos_lines[0:subset_size] content_annotations_lines = content_annotations_lines[0:subset_size] for img_path, bounding_boxes_path in zip(content_pos_lines, content_annotations_lines): # Elimino el caracter de nueva linea img_path = img_path.rstrip('\n') bounding_boxes_path = bounding_boxes_path.rstrip('\n') img_original = skimage.io.imread(os.path.join(root_folder, img_path)) # Cargo la imagen # Obtengo los bounding boxes de personas a recortar bounding_boxes_list = get_inria_bounding_boxes(root_folder, bounding_boxes_path) for bounding_box in bounding_boxes_list: persona = get_inria_bounding_box_cropped(img_original, bounding_box) # Recorto a la persona persona = utils.resize(persona) # Re escalo la imagen img_filename = utils.get_filename(img_path) # Genero el nombre que tendra la imagen guardada utils.save_img(persona, folder_to, img_filename) # Guardo la imagen en la carpeta de positivos
def delete_options(program, interface): """Deletes persisted program options from _CONFIG_DIR. Args: program: The program for which to delete options. interface: The interface for which to delete options. Returns: Whether deletion succeeded. """ filename = utils.get_filename(program, utils.FILENAME_KIND.options, interface) if os.path.exists(filename): try: os.remove(filename) utils.log('Removed persisted options for %s %s.', interface, program) except OSError: utils.log('Failed to remove persisted options for %s %s.', interface, program) return False else: utils.log('No persisted options to remove for %s %s.', interface, program) return True
def load_options(program, interface, tmp): """Loads program options, if any have been saved. Args: program: The program for which to load options. interface: The interface for which to load options. tmp: Whether to load options from /tmp (i.e. what is currently running) or _CONFIG_DIR (the options from last time --persist was set). Returns: The stored options (which can be passed to wifi._run), or None if the file cannot be opened. """ filename = utils.get_filename(program, utils.FILENAME_KIND.options, interface, tmp=tmp) try: with open(filename) as options_file: saved = ast.literal_eval(options_file.read()) os.environ.update(saved['env']) return saved['argv'] except IOError as e: if e.args[0] == errno.ENOENT: return None raise
def update_images_bBox(class_name, bag, topic="/dvs/image_raw", data_folder="data_bBox", annot_file="annotate.txt"): bag_name = get_filename(bag.filename, char="/") bag_name = remove_extension(bag_name) save_folder = path.join(path.join(data_folder, class_name), bag_name) save_folder = path.join(save_folder, IMG_FOLDER) file_name = path.join(save_folder, annot_file) df = pd.read_csv(file_name) bridge = CvBridge() for msg in bag.read_messages(topic): cv_image = bridge.imgmsg_to_cv2(msg.message, "rgb8") filename = str(msg.message.header.stamp) + ".png" cols_of_interest = [ "x", "y", "w", "h", "proba", "class_name", "tagger" ] for _, r in df.loc[df["boxImg_name"] == filename, cols_of_interest].iterrows(): x, y, w, h, prob, cl, tagger = r bbox_color = (0, 255, 0) if tagger == "yolo" else (0, 0, 255) draw_bBox(cv_image, (x, y), (x + w, y + h), cl, prob, bbox_color=bbox_color) img_name = path.join(save_folder, filename) cv2.imwrite(img_name, cv_image)
def download_sequence_set(accession_list, mol_type, assembly_dir, output_format, expanded, quiet): failed_accessions = [] count = 0 sequence_cnt = len(accession_list) divisor = utils.get_divisor(sequence_cnt) if sequence_cnt > 0: if not quiet: print 'fetching {0} sequences: {1}'.format(sequence_cnt, mol_type) target_file_path = os.path.join(assembly_dir, utils.get_filename(mol_type, output_format)) target_file = open(target_file_path, 'w') for accession in accession_list: success = sequenceGet.write_record(target_file, accession, output_format, expanded) if not success: failed_accessions.append(accession) else: count += 1 if count % divisor == 0 and not quiet: print 'downloaded {0} of {1} sequences'.format(count, sequence_cnt) if not quiet: print 'downloaded {0} of {1} sequences'.format(count, sequence_cnt) target_file.close() elif not quiet: print 'no sequences: ' + mol_type if len(failed_accessions) > 0: print 'Failed to fetch following {0}, format {1}'.format(mol_type, output_format) print ','.join(failed_accessions)
def get_links_from_url(url, force_download=False): # extract all the links from this url page page = get_filename(url) with open(page) as f: html_page = f.read() return get_links_from_text(html_page)
def scrape_website(seeds, base_url, force_download=False): # keep scraping and indexing until we run out of links to find # to keep track of what we need to find and where we're going links_to_follow = set( ["/" + "/".join(seed.split("/")[3:]) for seed in seeds]) visited_links = set() index = {} # repeat until we run out of links while links_to_follow: print("Number of pages in queue: {}".format(len(links_to_follow))) new_url = links_to_follow.pop() visited_links.add(new_url) maybe_save_url(base_url + new_url, force_download=force_download) links_to_follow = (links_to_follow | get_links_from_url( base_url + new_url, force_download=force_download)) - visited_links add_page_to_index(base_url + new_url, index) # save the list of pages we've got with open(get_filename(base_url + "/scraped_pages.txt"), "w") as f: f.write(base_url + "\n{}".format(base_url).join(sorted(visited_links))) sort_and_store_index(index, base_url)
def __init__(self, messenger, config, account, mediatype): """Checks if the config is correct and creates an API object.""" self.msg = messenger self.config = config self.msg.info(self.name, "Initializing...") # Get filenames userfolder = "%s.%s" % (account['username'], account['api']) self.userconfig_file = utils.get_filename(userfolder, 'user.json') # Handle userconfig and media type to load self._load_userconfig() if mediatype: self.userconfig['mediatype'] = mediatype self._save_userconfig() # Import the API libbase = account['api'] libname = "lib{0}".format(libbase) try: modulename = "trackma.lib.{0}".format(libname) __import__(modulename) apimodule = sys.modules[modulename] except ImportError, e: raise utils.DataFatal("Couldn't import API module: %s" % e.message)
def compare_districts(expected, actual): errors = [] if expected.keys() != actual.keys(): errors.append( f"expected districts for {expected.keys()}, got {actual.keys()}") return errors for chamber in expected: expected_districts = set(expected[chamber].keys()) actual_districts = set(actual[chamber].keys()) for district in sorted(expected_districts - actual_districts): if expected[chamber][district]: errors.append(f"missing legislator for {chamber} {district}") for district in sorted(actual_districts - expected_districts): errors.append( f"extra legislator for unexpected seat {chamber} {district}") for district in sorted(actual_districts & expected_districts): if len(actual[chamber][district]) < expected[chamber][district]: errors.append(f"missing legislator for {chamber} {district}") if len(actual[chamber][district]) > expected[chamber][district]: people = "\n\t".join( get_filename(o) for o in actual[chamber][district]) errors.append( f"extra legislator for {chamber} {district}:\n\t" + people) return errors
def get_input_fn(params, raw_data=False): batch_size = params['batch_size'] global shard_id if FLAGS.reassign_label: assert FLAGS.data_type == 'tfrecord' filename = os.path.join( FLAGS.label_data_dir, '%s-%d-%05d-of-%05d' % (FLAGS.file_prefix, FLAGS.worker_id, shard_id, FLAGS.num_shards)) tf.logging.info('processing {}'.format(filename)) # do not use replica here dst = utils.get_dst_from_filename(filename, FLAGS.data_type) else: filename = utils.get_filename(FLAGS.label_data_dir, FLAGS.file_prefix, shard_id, FLAGS.num_shards) tf.logging.info('processing files: {}'.format(str(filename))) dst = utils.get_dst_from_filename(filename, FLAGS.data_type, FLAGS.total_replicas, FLAGS.worker_id) if raw_data: return dst dst = dst.apply( tf.data.experimental.map_and_batch(functools.partial(preprocess), batch_size=batch_size, num_parallel_batches=16, drop_remainder=False)) dst = dst.map(functools.partial(set_shapes, batch_size)) dst = dst.prefetch(tf.data.experimental.AUTOTUNE) return dst
def save_options(program, interface, argv, tmp=False): """Saves program options. Persistence options are stripped before saving to prevent rewriting identical options when they are loaded and run. Args: program: The program for which to save options. interface: The interface for which to save options. argv: The options to save. tmp: Whether to save options to /tmp or _CONFIG_DIR. """ to_save = { 'argv': [arg for arg in argv if arg not in ('-P', '--persist')], 'env': {}, } # Also save important environment variables. if program == 'hostapd' and 'WIFI_PSK' in os.environ: to_save['env']['WIFI_PSK'] = utils.validate_and_sanitize_psk( os.environ['WIFI_PSK']) if program == 'wpa_supplicant' and 'WIFI_CLIENT_PSK' in os.environ: to_save['env']['WIFI_CLIENT_PSK'] = utils.validate_and_sanitize_psk( os.environ['WIFI_CLIENT_PSK']) utils.atomic_write( utils.get_filename(program, utils.FILENAME_KIND.options, interface, tmp=tmp), repr(to_save))
def smt_language_model_training (easy_config) : training_filename = utils.get_filename(exp_config["training_corpus"]) write_step("start language_model_training", easy_config) generate_sb (easy_config, training_filename) generate_lm (easy_config, training_filename) generate_arpa (easy_config, training_filename) generate_blm (easy_config, training_filename) write_step("finish language_model_training", easy_config)
def smt_testing (easy_config) : # t_start (easy_config) testfilename = utils.get_filename(exp_config["test_corpus"]) # testfilename = utils.get_filename(exp_config["develop_corpus"]) # t_tokenisation (easy_config, testfilename) # t_truecasing (easy_config, testfilename) # t_filter_model_given_input (easy_config) # t_filter_model_given_input (easy_config, easy_config.easy_evaluation, testfilename+'.true.'+exp_config['source_id']) run_test (easy_config, testfilename)
def smt_training_corpus_preparation (easy_config) : training_filename = utils.get_filename(exp_config["training_corpus"]) write_step("start training_corpus_preparation", easy_config) # print "corpus preparation" tokenisation (easy_config, training_filename) truecaser (easy_config, training_filename) truecasing (easy_config, training_filename) limiting_sentence_length (easy_config, training_filename) write_step("finish training_corpus_preparation", easy_config)
def bleu_score(easy_config): testfilename = utils.get_filename(exp_config["test_corpus"]) command2 = (easy_config.mosesdecoder_path + "scripts/generic/multi-bleu.perl " + " -lc " + os.path.join(easy_config.easy_evaluation, testfilename + ".true." + exp_config["target_id"]) + " < " + os.path.join(easy_config.easy_evaluation, testfilename + ".translated." + exp_config["target_id"]) # + " < " + easy_config.easy_evaluation + testfilename + ".translated." + exp_config["target_id"] + ".9" ) write_step (command2, easy_config) os.system(command2)
def leading_in_list(self): uri = WindowLoadPlaylist().run() try: p_name = utils.get_filename(uri) pl = MediaDB.create_playlist("local", p_name, []) new_item = ListTreeItem(pl) self.category_list.add_items([new_item]) new_item.song_view.async_add_uris(uri) except: pass
def smt_check_train(easy_config): training_filename = utils.get_filename(exp_config["training_corpus"]) import commands lines = int(commands.getstatusoutput('wc -l ' + os.path.join(easy_config.easy_corpus, training_filename+'.clean.'+exp_config["source_id"])) [1].split(' ')[0]) print "pairs ", lines # return from nmt import overfitting_prepare base = 10 if lines / 3000 > base: base = lines / 3000 overfitting_prepare(easy_config, training_filename, base) test_on_train(easy_config)
def __init__(self, dataset_name, query, format): self.dataset_name = dataset_name self.query = query self.format = format self.dpi = 72. self.size = '11x9' self.filetype, self.mime = utils.get_mimetype(format) self.filename = utils.get_filename( self.plottype, dataset_name, self.filetype )
def run(self, edit): carpeta=utils.get_filedir() archivo=utils.get_filename() archivos=sorted(os.listdir(carpeta)) i=0 for arc in archivos: if arc == archivo: i+=1 break i+=1 nuevo_archivo=archivos[i] window=sublime.active_window() window.open_file(carpeta+os.sep+nuevo_archivo)
def nmt_test(easy_config): testfilename = utils.get_filename(exp_config["test_corpus"]) t_tokenisation(easy_config, testfilename) t_truecasing(easy_config, testfilename) command1 = "python " + easy_config.nmt_path + "sample.py"\ + " --beam-search "\ + " --beam-size 12"\ + " --state " + os.path.join(easy_config.easy_train, "search_state.pkl")\ + " --source " + os.path.join(easy_config.easy_evaluation, testfilename + ".true." + exp_config["source_id"])\ + " --trans " + os.path.join(easy_config.easy_evaluation, testfilename + ".translated." + exp_config["target_id"])\ + " " + os.path.join(easy_config.easy_train, "search_model.npz")\ + " >& " + os.path.join(easy_config.easy_evaluation, "trans_out.txt") +" &" write_step(command1, easy_config) os.system(command1)
def fan_analyze(easy_config): devfilename = utils.get_filename(exp_config["develop_corpus"]+str(1)) print devfilename weight_dic = {} standard_line = "0.0105348\t0.0651135\t0.0532412\t0.00603957\t0.0532839\t0.0809408\t0.122954\t0.271147\t0.0633011\t0.0520243\t0.0513287\t0.0239177\t0.024159\t1\t-0.122014" paths = os.listdir(easy_config.easy_tuning) outfile = open(os.path.join(easy_config.easy_tuning, "cluster_weights.txt"),'w') outfile_re = open(os.path.join(easy_config.easy_tuning, "cluster_weights_reference.txt"),'w') # count_line = 0 count0 = 0 count = 0 for path in paths: if os.path.isfile(os.path.join(easy_config.easy_tuning, path)):continue # if os.path.isfile(os.path.join(easy_config.easy_tuning, path+"/run20.moses.ini")): # print path # outfile.write(infile.readline()) # outfile.write(infile.readline()) dic = read_moses_ini(os.path.join(easy_config.easy_tuning, path)) new_line = "" if dic : print path + '\t' + dic['LM0'] + '\t' + dic['TranslationModel00'] + '\t' + dic['bleu'] for k in sorted(dic): if k == "bleu":continue # print k new_line += dic[k].strip() + '\t' # outfile.write(new_line+'\n') count += 1 # break if not weight_dic.has_key(int(path)) : weight_dic[int(path)] = new_line else: count0 += 1 new_line = standard_line # outfile.write(standard_line + '\n') infile = open(os.path.join(easy_config.easy_tuning, path+"/"+devfilename + ".true." + exp_config["source_id"]), 'r') for line in infile.readlines(): outfile.write(line) outfile.write(new_line+'\n') infile.close() infile = open(os.path.join(easy_config.easy_tuning, path+"/"+devfilename + ".true." + exp_config["target_id"]), 'r') for line in infile.readlines(): outfile_re.write(line) infile.close() outfile.close() outfile_re.close() print count0, count, len(weight_dic), weight_dic.keys() return weight_dic
def nmt_prepare(easy_config): # cpnmt(easy_config) training_filename = utils.get_filename(exp_config["training_corpus"]) # print exp_config["training_corpus"],training_filename # exit() write_step("start nmt_prepare", easy_config) tokenisation (easy_config, training_filename) truecaser (easy_config, training_filename) truecasing (easy_config, training_filename) limiting_sentence_length (easy_config, training_filename) pkl(easy_config, training_filename) invert(easy_config, training_filename) hdf5(easy_config, training_filename) shuff(easy_config, training_filename) write_step("finish nmt_prepare", easy_config)
def make_file(cls, ref): folder_path = utils.get_dirname(ref) file_path = utils.get_filename(ref) dest_exists = cls.exists(folder_path) if dest_exists: dest_exists = cls.exists(ref) if dest_exists: raise OSError("[Errno 17] File exists: '%s'" % ref) elif not cls.is_folder(folder_path): raise OSError("[Errno 20] Not a directory: '%s'" % folder_path) else: cls.make_folder(folder_path) fh = cls.temp_file_class(ref, cls._save_file) return fh
def make_file(cls, ref): folder_path = utils.get_dirname(ref) file_path = utils.get_filename(ref) dest_exists = cls.exists(folder_path) if dest_exists: dest_exists = cls.exists(ref) if dest_exists: raise OSError("[Errno 17] File exists: '%s'" % ref) elif not cls.is_folder(folder_path): raise OSError("[Errno 20] Not a directory: '%s'" % folder_path) else: try: cls.make_folder(folder_path) except IOError, e: raise OSError(e)
def update_naming(self): self.save() naming = utils.get_filename() self.naming_example.setText(naming) if self.naming.currentIndex() == 0: self.amount.setEnabled(True) self.format.setEnabled(False) elif self.naming.currentIndex() == 1: self.amount.setEnabled(True) self.format.setEnabled(False) elif self.naming.currentIndex() == 2: self.amount.setEnabled(False) self.format.setEnabled(True) elif self.naming.currentIndex() == 3: self.amount.setEnabled(False) self.format.setEnabled(False)
def load_vars_file(filename): filterp = lambda x: x if x != [0] else [] intc = lambda x: int(x) if x != '' else 0 varstable = {} vars_data = parser.parse_vars_analyzer_file(filename) for key, data in vars_data.iteritems(): key = utils.get_filename(key,True) pvars = {} for val in data: md = val[0] avars = map(intc, val[5].strip('[]').split(',')) bvars = map(intc, val[6].strip('[]').split(',')) cvars = map(intc, val[7].strip('[]').split(',')) dvars = map(intc, val[8].strip('[]').split(',')) pvars[md] = map(filterp, [avars, bvars, cvars, dvars]) varstable[key] = pvars return varstable
def _put_file(self, data): try: with open(data, 'rb') as datafile: service_path_raw = utils.get_config_section(DropboxService.DESCRIPTION.fget(), constants.BACKUP_FOLDER) normalized_service_path = utils.normalize_folder(service_path_raw) final_path = normalized_service_path + utils.get_filename(data) print ("File Destination: " + final_path) try: # db.put_file(<dest_path>, <object file>, <overwrite>) response = self.dropbox_instance.put_file(final_path, datafile, True) print (data + " was uploaded (" + response['size'] + ")") except dbrest.ErrorResponse as e: print ("Error on upload. Skipped file!") print (e) except OSError as e: print ("Could not open file. Skipped!") print (e)
def fan_decoder(easy_config, filename): testfilename = utils.get_filename(exp_config["test_corpus"]) # testfilename = "C_B.Dev" # testfilename = "cluster_weights.dev" print testfilename old_moses_ini = open(os.path.join(easy_config.easy_train,"model/moses.ini"),'r') new_moses_ini = open(os.path.join(easy_config.easy_evaluation, filename+"_moses.ini"), 'w') for line in old_moses_ini.readlines(): if line.strip() != "[weight]": new_moses_ini.write(line.strip() + '\n') else: new_moses_ini.write("[alternate-weight-setting]\n") break old_moses_ini.close() infile = open(os.path.join(easy_config.easy_evaluation, filename), 'r') testfile = open(os.path.join(easy_config.easy_evaluation, filename+'.'+exp_config['source_id']), 'w') count = 0 state = 0 for line in infile.readlines(): if state == 0: testfile.write("<seg weight-setting="+str(count)+">"+line.strip()+"</seg>\n") # outfile.close() state = 1 elif state == 1: weight_dic = weights2weightsdic(line.strip()) new_moses_ini.write(generate_weight_setting(count, weight_dic)) count += 1 state = 0 infile.close() testfile.close() new_moses_ini.close() command1=easy_config.mosesdecoder_path+"bin/moses "\ + " -threads 1"\ + " -alternate-weight-setting"\ + " -f " + os.path.join(easy_config.easy_evaluation, filename+"_moses.ini ")\ + " -i " + os.path.join(easy_config.easy_evaluation, filename + "." + exp_config["source_id"])\ + " > " + os.path.join(easy_config.easy_evaluation, filename + ".translated." + exp_config["target_id"])\ + " 2> " + os.path.join(easy_config.easy_evaluation, filename + ".out") + " " command2 = easy_config.mosesdecoder_path + "scripts/generic/multi-bleu.perl "\ + " -lc " + os.path.join(easy_config.easy_evaluation, testfilename + ".true." + exp_config["target_id"])\ + " < " + os.path.join(easy_config.easy_evaluation, filename + ".translated." + exp_config["target_id"]) write_step (command1, easy_config) os.system(command1) write_step (command2, easy_config) os.system(command2)
def _load(self, account): self.account = account # Create home directory utils.make_dir('') self.configfile = utils.get_root_filename('config.json') # Create user directory userfolder = "%s.%s" % (account['username'], account['api']) utils.make_dir(userfolder) self.userconfigfile = utils.get_filename(userfolder, 'user.json') self.msg.info(self.name, 'Reading config files...') try: self.config = utils.parse_config(self.configfile, utils.config_defaults) self.userconfig = utils.parse_config(self.userconfigfile, utils.userconfig_defaults) except IOError: raise utils.EngineFatal("Couldn't open config file.")
def user_image_upload(request): # Check authentication if "username" not in request.session: return redirect("/login/") if request.method == 'GET': form = UploadForm() return render(request, 'upload.html', { 'title': 'Upload', 'form': form }) elif request.method == 'POST': form = UploadForm(request.POST, request.FILES) if not form.is_valid(): return render(request, 'upload.html', { 'title': 'Upload', 'form': form, 'response_message': 'fail' }) # Handle file fpath = handle_uploaded_file(request.FILES['file_image']) fname = get_filename(request.FILES['file_image'].name) # Send file to API params = { "id": fname, "name": form.cleaned_data['appname'], "status": form.cleaned_data['status'], "manifest_id": form.cleaned_data['manifest_id'], "storage_id": form.cleaned_data['storage_id'], "plugin_id": form.cleaned_data['plugin_id'], "disk_format": form.cleaned_data['disk_format'], "container_format": form.cleaned_data['container_format'], } files = {"image": open(fpath, 'rb')} try: putNewAppToAPI(fname, files, params) request.session['upload_status'] = 'success' except requests.exceptions.HTTPError: return render(request, 'upload.html', { 'title': 'Upload', 'form': form, 'response_message': 'fail' }) # Delete file os.remove(fpath) return redirect('/store/')
def build_tree(self): """ Build tree using indentation level. Indentation indicates a change in hierarchy level. current line ending in '/' or not indicates regular file or not. The question in this loop: where to put this new line?, which is asking "who is the parent of new line?" """ parent_node = self.virtual_root prev_indent = -1 for line in self.input: cur_indent = utils.get_indent_count(line, self.indent_size) distance = cur_indent - prev_indent # who is the parent? parent_node = self._find_new_parent(parent_node, distance) filename = (utils.get_dirname(line) if utils.is_dir(line) else utils.get_filename(line)) child = dict( parent = parent_node, children = [] if utils.is_dir(line) else None, data = { 'filename': filename, 'basedir': os.path.join(parent_node['data']['basedir'], filename) }, ) parent_node['children'].append(child) prev_indent = cur_indent
def bnplm (easy_config) : training_filename = utils.get_filename(exp_config["training_corpus"]) extract_training (easy_config, training_filename) train_nplm (easy_config, training_filename)
def smt_tuning (easy_config) : devfilename = utils.get_filename(exp_config["develop_corpus"]) # print "tuning" tuning_tokenizer (easy_config, devfilename) tuning_truecase (easy_config, devfilename) tuning_process (easy_config, devfilename)
def smt_translation_model_training(easy_config): training_filename = utils.get_filename(exp_config["training_corpus"]) translation_model(easy_config, training_filename)