def download_audio_with_urls(data, out_ext="wav"): for d in data: original_path = os.path.join(base_dir, 'audio', os.path.basename(d.text_path)).replace( '.txt', '.original.mp3') out_path = os.path.join(base_dir, 'audio', os.path.basename(d.text_path)).replace( '.txt', '.wav') options = { 'format': 'bestaudio/best', 'outtmpl': original_path, 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '320', }], } with youtube_dl.YoutubeDL(options) as ydl: ydl.download([d.video_url]) audio = AudioSegment.from_file(original_path) audio[d.start:d.end].export(out_path, out_ext) remove_file(original_path)
def extract_forms(url, follow = "false", cookie_jar = None, filename = "forms.json"): utils.remove_file(os.path.join(os.path.dirname(__file__), filename)) if cookie_jar == None: try: out = utils.run_command('{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl form -o {} -a start_url="{}" -a follow={} -a proxy={}'.format(filename, url, follow, HTTP_PROXY)), EXTRACT_WAIT_TIME) except: out = utils.run_command('{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl form -o {} -a start_url="{}" -a follow={}'.format(filename, url, follow)), EXTRACT_WAIT_TIME) else: cookie_jar_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename.replace('.json', '.txt')) cookie_jar.save(cookie_jar_path) out = utils.run_command('{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl form_with_cookie -o {} -a start_url="{}" -a cookie_jar={}'.format(filename, url, cookie_jar_path)), EXTRACT_WAIT_TIME) with open(os.path.join(os.path.dirname(__file__), filename)) as json_forms: forms = json.load(json_forms) utils.remove_file(os.path.join(os.path.dirname(__file__), filename)) return forms
def get_chromagram(self, mode=MusicDataType.AUDIO): """ Get chromagram for the spotify track Creates chromagram either from mp3(:meth:`utils.get_chromagram_from_audio`) or echonest analysis (:meth:`apiwrappers.echonest.get_echonestsong_by_spotifyid`) of the spotify track.Chromagram may return None if there is no matching echonest song for the spotify track Args: mode: :class:`.MusicDataType`. either should be audio or echonest Returns: Chromagram representation. """ directory = os.path.dirname(os.path.abspath(__file__)) if mode == MusicDataType.AUDIO: audio_file_path = directory + '/' + self.id + '.mp3' utils.download_file(self.preview_url, audio_file_path) chromagram = utils.get_chromagram_from_audio(audio_file_path) utils.remove_file(audio_file_path) return chromagram elif mode == MusicDataType.ECHONEST: e = get_echonestsong_by_spotifyid(self.id) if e: self.echonest_features = e.features return e.features.chroma else: print "No echonest song found for spotify:track:"+self.id return None
def get_chromagram(self, mode=MusicDataType.AUDIO): """Get chromagram for the score Creates chromagram either from mp3,(:meth:`utils.get_chromagram_from_audio`) midi (:meth:`utils.get_chromagram_from_midi`) or echonest analysis(:meth:`apiwrappers.echonest.get_echonest_features_for_local_file`) of the score Args: mode: :class:`.MusicDataType`. Returns: Chromagram np.array """ directory = os.path.dirname(os.path.abspath(__file__)) base_path = directory + '/' + self.id if mode == MusicDataType.AUDIO: audio_file_path = base_path + '.mp3' utils.download_file(self.mp3, audio_file_path) chromagram = get_chromagram_from_audio(audio_file_path) elif mode == MusicDataType.MIDI: midi_file_path = base_path + '.mid' utils.download_file(self.midi, midi_file_path) chromagram = utils.get_chromagram_from_midi(midi_file_path) elif mode == MusicDataType.ECHONEST: audio_file_path = base_path + '.mp3' utils.download_file(self.mp3, audio_file_path) self.features = \ echonest.get_echonest_features_for_local_file(audio_file_path) chromagram = self.features.chroma np.savetxt(self.id + '.csv', chromagram) utils.remove_file(audio_file_path) return chromagram
def extract_urls(url, follow="false", cookie_jar=None, filename="urls.json"): utils.remove_file(os.path.join(os.path.dirname(__file__), filename)) if cookie_jar == None: try: out = utils.run_command( '{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl url -o {} -a start_url="{}" -a follow={} -a proxy={}' .format(filename, url, follow, HTTP_PROXY)), EXTRACT_WAIT_TIME) except: out = utils.run_command( '{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl url -o {} -a start_url="{}" -a follow={}'. format(filename, url, follow)), EXTRACT_WAIT_TIME) else: cookie_jar_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), filename.replace('.json', '.txt')) cookie_jar.save(cookie_jar_path) out = utils.run_command( '{} && {}'.format( utils.cd(os.path.dirname(os.path.abspath(__file__))), 'scrapy crawl url_with_cookie -o {} -a start_url="{}" -a cookie_jar={}' .format(filename, url, cookie_jar_path)), EXTRACT_WAIT_TIME) with open(os.path.join(os.path.dirname(__file__), filename)) as json_urls: urls = json.load(json_urls) utils.remove_file(os.path.join(os.path.dirname(__file__), filename)) return urls
def test_utils(self): app.config['ENV'] = 'TESTING' # allowing files types self.assertTrue(utils.allowed_file('filename.jpg')) self.assertFalse(utils.allowed_file('filename.gif')) self.assertTrue(utils.allowed_file('filename.png')) self.assertFalse(utils.allowed_file('filename.pdf')) # search file function self.assertTrue(utils.find_file('1.jpg')) self.assertTrue(utils.find_file('2.jpg')) self.assertFalse(utils.find_file('3.jpg')) # download function file = utils._download( 'http://www.reportingday.com/wp-content/uploads/2018/06/Cat-Sleeping-Pics.jpg' ) self.assertTrue(file['success']) self.assertEqual(file['name'], file['name']) # remove function self.assertTrue(utils.remove_file(file['name'])) self.assertFalse(utils.remove_file('randomanme.jpg')) # predict function self.assertEqual(utils.make_prediction('1.jpg'), 'dog') self.assertEqual(utils.make_prediction('2.jpg'), 'cat') self.assertRaises(FileNotFoundError, utils.make_prediction, 'asd.jpg')
def __del_mask_with_transparency(self): if utils.find_file_in_folder_by_filename_substring( MASK_WITH_TRANSPARENT_FOLDER, self. __common_substring_of_wall_and_appropriate_mask_names) != None: mask_path = utils.find_file_in_folder_by_filename_substring( MASK_WITH_TRANSPARENT_FOLDER, self.__common_substring_of_wall_and_appropriate_mask_names) utils.remove_file(mask_path)
def __del_mask_with_black(self): if utils.find_file_in_folder_by_filename_substring( MASK_WITH_BLACK_FOLDER, self. __common_substring_of_wall_and_appropriate_mask_names) != None: mask_path = utils.find_file_in_folder_by_filename_substring( MASK_WITH_BLACK_FOLDER, self.__common_substring_of_wall_and_appropriate_mask_names) utils.remove_file(mask_path)
def _archive(self): """ Archive the az logs return True if some az logs are archived; False if nothing is archived """ query_start, query_end = self.get_query_interval() if not query_end: logger.info( "All az logs have been archived.the end time of the last archiving is {}" .format(query_start)) return False logger.info("Archive az logs between {} and {}".format( query_start, query_end)) resource_id = self.get_resource_id(query_start) metadata = { "start_archive": timezone.now(), "resource_id": resource_id, self.ARCHIVE_STARTTIME: query_start, self.ARCHIVE_ENDTIME: query_end } self.set_metadata(metadata) try: dump_file = None with tempfile.NamedTemporaryFile( suffix=".json", prefix=self.settings.RESOURCE_NAME, delete=False) as f: dump_file = f.name if self.settings.TENANT: login_cmd = "az login --service-principal -u {} -p {} --tenant {}".format( self.settings.USER, self.settings.PASSWORD, self.settings.TENANT) else: login_cmd = "az login -u {} -p {}".format( self.settings.USER, self.settings.PASSWORD) cmd = "{}&&az monitor log-analytics query -w {} --analytics-query '{}' -t {}/{} > {}".format( login_cmd, self.settings.WORKSPACE, self.settings.QUERY, timezone.utctime(query_start).strftime("%Y-%m-%dT%H:%M:%SZ"), timezone.utctime(query_end).strftime("%Y-%m-%dT%H:%M:%SZ"), dump_file) subprocess.check_output(cmd, shell=True) with open(dump_file, 'r') as f: data = json.loads(f.read()) metadata["log_records"] = len(data) resourcemetadata = self.resource_repository.push_file( dump_file, metadata, f_post_push=_set_end_datetime("end_archive")) return True finally: remove_file(dump_file)
def __init__(self, X_train, y_train, filename, lambd): self.X = X_train self.y = y_train self.filename = "results/" + filename self.loss_filename = "results/" + filename[:-4] + "_f_evals.txt" self.lambd = lambd self.iters = 0 remove_file(self.filename) remove_file(self.loss_filename)
def __init__(self, veri_unzipped_path, feature_path, seed, typ, num_run): # mandatory self.veri_unzipped_path = veri_unzipped_path self.feature_path = feature_path self.num_run = num_run # optional but initialized self.seed = seed self.typ = typ # logging log_file = "metric.log" utils.remove_file(log_file) logging.basicConfig(filename=log_file, level=logging.DEBUG)
def _get_next_example(self, data_dir): '''npz 1개를 읽어 처리한다. Loads a single example (input, mel_target, linear_target, cost) from disk''' data_paths = self.path_dict[data_dir] while True: if self._offset[data_dir] >= len(data_paths): self._offset[data_dir] = 0 if self.data_type == 'train': self.rng.shuffle(data_paths) data_path = data_paths[self._offset[data_dir]] # npz파일 1개 선택 self._offset[data_dir] += 1 try: if os.path.exists(data_path): data = np.load( data_path ) # data속에는 "linear","mel","tokens","loss_coeff" else: continue except: remove_file(data_path) continue if not self.skip_path_filter: break if self.min_n_frame <= data["linear"].shape[ 0] <= self.max_n_frame and len( data["tokens"]) > self.min_tokens: break input_data = data['tokens'] # 1-dim mel_target = data['mel'] if 'loss_coeff' in data: loss_coeff = data['loss_coeff'] else: loss_coeff = 1 linear_target = data['linear'] stop_token_target = np.asarray( [0.] * len(mel_target) ) # mel_target은 [xx,80]으로 data마다 len이 다르다. len에 따라 [0,...,0] # multi-speaker가 아니면, speaker_id는 넘길 필요 없지만, 현재 구현이 좀 꼬여 있다. 그래서 무조건 넘긴다. if self.is_multi_speaker: return (input_data, loss_coeff, mel_target, linear_target, stop_token_target, self.data_dir_to_id[data_dir], len(linear_target)) else: return (input_data, loss_coeff, mel_target, linear_target, stop_token_target, len(linear_target))
def write_binary(pattern, bank_letter, pad_number): outputBIN = f'./export/PTN00{get_pad_code(bank_letter, pad_number)}.BIN' pattern_length_encoding = f'00 8C 00 00 00 00 00 00 \n00 {get_bar_code(pattern)} 00 00 00 00 00 00' remove_file('./test.txt') with open(outputBIN, 'wb') as output_binary: notes = [] for track in pattern.tracks: notes += track.notes notes = sorted(notes, key=lambda note: note.start_tick) for i, note in enumerate(notes): next_note_start = 0 if i+1 == len(notes) else notes[i+1].start_tick write_hex(output_binary, write_note(note, next_note_start)) write_hex(output_binary, pattern_length_encoding)
def parse(date): print(f'[PAESE]{date}') api = f"https://api.pixivic.com/ranks?page=1&date={date}&mode=day&pageSize=1000" rep = requests.get(api, headers=config.HEADERS, timeout=10) data = rep.json().get('data') # type: list if not '获取排行成功' in rep.text or data is None or len(data) == 0: print(f'没获取到{date}这天的数据') return None with open(os.path.join(config.DL_TASK_DIR, f'{date}.json'), 'w') as f: json.dump(data, f) utils.remove_file(os.path.join(config.PARSE_TASK_DIR, date)) return None
def run(self): utils.info("BEGIN " + self.get_name()) utils.make_tmppath() tarname=CONF["name"] + "-" + CONF["version"] + ".tar.gz" utils.remove_from_native(CONF) if os.path.exists(CONF["pathdst"]): utils.remove_path(CONF["pathdst"]) if os.path.exists(utils.PATHTMP + os.sep + tarname): utils.remove_path(utils.PATHTMP + os.sep + tarname) if os.path.exists(utils.PATHTMP + os.sep + CONF["name"] + "-" + CONF["version"]): utils.remove_path(utils.PATHTMP + os.sep + CONF["name"] + "-" + CONF["version"]) utils.download_file(CONF["urlsrc"], utils.PATHTMP + os.sep + tarname) utils.untar_file(utils.PATHTMP + os.sep + tarname, utils.PATHTMP) utils.remove_file(utils.PATHTMP + os.sep + tarname) os.rename(utils.PATHTMP + os.sep + CONF["name"] + "-" + CONF["version"], CONF["pathdst"]) if utils.is_mac(): #CORREGGE configure apppth=CONF["pathdst"] + os.sep + "configure" f = codecs.open(apppth, encoding='utf-8') appdata = f.read() f.close() appdata=appdata.replace('.dylib','.so').replace("-dynamiclib","-shared") os.remove(apppth) f = codecs.open(apppth, encoding='utf-8', mode='w+') f.write(appdata) f.close() os.chmod(apppth, stat.S_IRWXU) if utils.is_windows(): utils.system_exec(["mingw32-make.exe", "-fwin32/Makefile.gcc"],CONF["pathdst"]) else: utils.system_exec(["./configure"],CONF["pathdst"]) utils.system_exec(["make"],CONF["pathdst"]) if utils.is_mac(): #CORREGGE zutil.h apppth=CONF["pathdst"] + os.sep + "zutil.h" f = codecs.open(apppth, encoding='utf-8') appdata = f.read() f.close() appdata=appdata.replace('# define local static','//# define local static') os.remove(apppth) f = codecs.open(apppth, encoding='utf-8', mode='w+') f.write(appdata) f.close() utils.copy_to_native(CONF) utils.info("END " + self.get_name())
def get_chromagram(self,mode=MusicDataType.AUDIO): directory = os.path.dirname(os.path.abspath(__file__)) if mode == MusicDataType.AUDIO: audio_file_path = directory + '/' + self.id + '.mp3' utils.download_file(self.preview_url, audio_file_path) chromagram = utils.get_chromagram_from_audio(audio_file_path) utils.remove_file(audio_file_path) return chromagram elif mode == MusicDataType.ECHONEST: if self.features: return self.features.chroma else: print "No echonest song found for:"+self.id return None
def save_model(self): torch.save(self.shared.state_dict(), self.shared_path) logger.info(f'[*] SAVED: {self.shared_path}') torch.save(self.controller.state_dict(), self.controller_path) logger.info(f'[*] SAVED: {self.controller_path}') epochs, shared_steps, controller_steps = self.get_saved_models_info() for epoch in epochs[:-self.args.max_save_num]: paths = glob.glob( os.path.join(self.args.model_dir, f'*_epoch{epoch}_*.pth')) for path in paths: utils.remove_file(path)
def _get_next_example(self, data_dir): '''Loads a single example (input, mel_target, linear_target, cost) from disk''' data_paths = self.path_dict[data_dir] while True: if self._offset[data_dir] >= len(data_paths): self._offset[data_dir] = 0 if self.data_type == 'train': self.rng.shuffle(data_paths) data_path = data_paths[self._offset[data_dir]] self._offset[data_dir] += 1 try: if os.path.exists(data_path): data = np.load(data_path) else: continue except: remove_file(data_path) continue if not self.skip_path_filter: break if self.min_n_frame <= data["linear"].shape[0] <= self.max_n_frame and \ len(data["tokens"]) > self.min_tokens: break input_data = data['tokens'] mel_target = data['mel'] # cmu_dict enabled -> convert some chararcter in known words to arpabet (p_cmudict possibilty) if self._cmudict and random.random() < _p_cmudict: txt = text.sequence_to_text(input_data, False, True) txt = ' '.join( [self._maybe_get_arpabet(word) for word in txt.split(' ')]) input_data = (text.text_to_sequence(txt, as_token=False)) if 'loss_coeff' in data: loss_coeff = data['loss_coeff'] else: loss_coeff = 1 linear_target = data['linear'] return (input_data, loss_coeff, mel_target, linear_target, self.data_dir_to_id[data_dir], len(linear_target))
async def sing(ctx, *, search): user = ctx.message.author server = ctx.message.guild connected = ctx.author.voice channel_name = connected.channel.name vc = ctx.voice_client if connected: if not vc: await connected.channel.connect() else: if not vc.is_connected(): await connected.channel.connect() else: if vc.channel.name != channel_name: await vc.disconnect() await connected.channel.connect() async with ctx.typing(): player, path = await YTDLSource.from_url(ctx, search, loop=client.loop) # player = await YTDLSource.create_source(ctx, search, loop=client.loop) ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else remove_file(path)) await ctx.send( f'De la {user.mention} cu dedicație pentru toată lumea: 🎶 {player.title} 🎶' ) else: await ctx.channel.send(f'{user.mention} is not in a channel.')
def pull(diff_dict): re_bool = True try: print("Pull start") diff_dict = get_tree_leaf(diff_dict, local_only) ip, remote_path, auth = read_config() local_path = "." for _, d in tqdm.tqdm(diff_dict[local_only], desc="Delete", ncols=0): if not utils.remove_file(local_path, d): print("{}: file remove fault".format(d)) re_bool = False for _, m in tqdm.tqdm(diff_dict[modify], desc="Modify", ncols=0): if not remote_download(ip, remote_path, local_path, m, auth): print("{}: file modify fault".format(m)) re_bool = False for is_dir, a in tqdm.tqdm(diff_dict[remote_only], desc="Add", ncols=0): if is_dir: local_file = os.path.join(local_path, a) if not os.path.exists(local_file): os.makedirs(local_file) if not os.path.exists(local_file): print("{}: dir creat fault".format(a)) re_bool = False else: if not remote_download(ip, remote_path, local_path, a, auth): print("{}: file add fault".format(a)) re_bool = False print("Pull end") return re_bool except BaseException as e: print("error: {}".format(e)) return False
def remote_download(ip, remote_root, local_root, file, auth): _url = "{}/download?root={}&file={}&auth={}".format( ip, quote(remote_root), quote(file), auth) cache_file = os.path.join(local_root, utils.cache_dir, os.path.basename(file) + "_" + str(uuid.uuid4())) try: response = requests.get(_url, timeout=30) if response.status_code == 200: if response.content != "ERROR": with open(cache_file, "wb") as f: f.write(response.content) abs_file = os.path.join(local_root, file) if not utils.remove_file(local_root, file): print("download remove local error:{}".format(abs_file)) else: if utils.move_file(cache_file, abs_file): return True else: print("download file cache -> local error:{}".format( cache_file)) else: print("download error,maybe server not root:{} file:{}".format( remote_root, file)) else: print("response.status_code: {}".format(response.status_code)) except BaseException as e: import traceback traceback.print_exc() print(e) if os.path.exists(cache_file): os.remove(cache_file) return False
def remove_configuration_file(self, system): """ Function to remove sonar configuration file. """ utils.print_(">> Removendo arquivo de configuracao ...") try: utils.remove_file(self.sonar_folder + "{}.sonarsource.properties".format(system)) utils.ok_text( "Arquivo {}.sonarsource.properties removido com sucesso.". format(system)) except Exception: utils.error_text( "Nao foi possivel remover o arquivo de configuracao do sistema {}" .format(system)) utils.system_exit_ok()
def main(): files = os.listdir(config.DL_TASK_DIR) if not files: print('没有下载任务文件,请先执行paser') return None while files: file = os.path.join(config.DL_TASK_DIR, files.pop()) with open(file, 'r') as f: items = json.load(f) utils.remove_file(file) for i in items: queue.put(i) threads = [Thread(target=dl) for _ in range(10)] for t in threads: t.start() for t in threads: t.join()
def send_tweet_media(self, imgdata): try: with open(self.config["tmpfile"], "wb") as tmpfo: tmpfo.write(imgdata) self.api.update_with_media(self.config["tmpfile"], status=self.config["generichashtags"]) utils.info("tweeted media (%s)" % (self.config["tmpfile"])) utils.remove_file(self.config["tmpfile"]) except tweepy.error.TweepError as ex: utils.warn(ex) utils.enqueue(queuefile=self.config["tweetmediaqueue"], data=imgdata) time.sleep(self.config["tweetdelay"]) except: import traceback traceback.print_exc() self.send_dm("exception while sending media tweet: %dB" % (len(imgdata))) utils.warn("exception, sent dm to %s" % (self.config["twitterusers"].split("|")[0])) utils.remove_file(self.config["tmpfile"])
def _get_next_example(self, data_dir): '''npz 1개를 읽어 처리한다. Loads a single example (input, mel_target, linear_target, cost) from disk''' data_paths = self.path_dict[data_dir] while True: if self._offset[data_dir] >= len(data_paths): self._offset[data_dir] = 0 if self.data_type == 'train': self.rng.shuffle(data_paths) data_path = data_paths[self._offset[data_dir]] # npz파일 1개 선택 self._offset[data_dir] += 1 try: if os.path.exists(data_path): data = np.load( data_path ) # data속에는 "linear","mel","tokens","loss_coeff" else: continue except: remove_file(data_path) continue if not self.skip_path_filter: break if self.min_n_frame <= data["linear"].shape[ 0] <= self.max_n_frame and len( data["tokens"]) > self.min_tokens: break input_data = data['tokens'] # 1-dim mel_target = data['mel'] if 'loss_coeff' in data: loss_coeff = data['loss_coeff'] else: loss_coeff = 1 linear_target = data['linear'] return (input_data, loss_coeff, mel_target, linear_target, self.data_dir_to_id[data_dir], len(linear_target))
def _process_utterance(audio_path, data_dir, tokens, loss_coeff): audio_name = os.path.basename(audio_path) filename = audio_name.rsplit('.', 1)[0] + ".npz" numpy_path = os.path.join(data_dir, filename) if not os.path.exists(numpy_path): wav = load_audio(audio_path) try: linear_spectrogram = spectrogram(wav).astype(np.float32) mel_spectrogram = melspectrogram(wav).astype(np.float32) except: return 0 data = { "linear": linear_spectrogram.T, "mel": mel_spectrogram.T, "tokens": tokens, "loss_coeff": loss_coeff, } n_frame = linear_spectrogram.shape[1] if hparams.skip_inadequate: min_n_frame = hparams.reduction_factor * hparams.min_iters max_n_frame = hparams.reduction_factor * hparams.max_iters - hparams.reduction_factor if min_n_frame <= n_frame <= max_n_frame and len(tokens) >= hparams.min_tokens: return None np.savez(numpy_path, **data, allow_pickle=False) else: try: data = np.load(numpy_path) n_frame = data["linear"].shape[0] except: remove_file(numpy_path) return _process_utterance(audio_path, data_dir, tokens, loss_coeff) return n_frame
def main(args): """Main function for lattice preprocessing.""" global LOGGER LOGGER = utils.get_logger(args.verbose) dst_dir = args.dst_dir utils.check_dir(dst_dir) file_list_dir = args.file_list_dir utils.check_dir(file_list_dir) wordvec_path = os.path.join(args.wordvec) wordvec = utils.load_wordvec(wordvec_path) subword_embedding_path = os.path.join(args.embedding) subword_embedding = utils.load_wordvec(subword_embedding_path) subset_list = ['train.lat.txt', 'cv.lat.txt', 'test.lat.txt'] processed_subset_list = [] for subset in subset_list: subset_name = subset.split('.')[0] + '.' + subset.split('.')[2] preprocessed_list_file = os.path.join(args.processed_file_list_dir, subset_name) utils.remove_file(preprocessed_list_file) processed_subset_list.append(preprocessed_list_file) for i, subset in enumerate(subset_list): lat_file_list = os.path.join(file_list_dir, subset) # Compile the list of lat.gz files to process lattice_list = [] with open(os.path.abspath(lat_file_list), 'r') as file_in: for line in file_in: lattice_list.append(line.strip()) with Pool(args.num_threads) as pool: pool.starmap( process_one_lattice, zip(lattice_list, repeat(dst_dir), repeat(wordvec), repeat(subword_embedding), repeat(args.embed_apostrophe), repeat(processed_subset_list[i])))
def get_x_from_path(model=None, container_path=None, classes=None, save=False, filename=None, verbose=False): """ """ if model is None: model = config.model assert utils.is_keras_pretrained_model(model) if container_path is None: container_path = config.train_dir imagepaths = utils.images_under_subdirs(container_path, subdirs=classes) tensor_list = [] target_size = config.target_size_dict[model] if verbose: print('Started: images -> tensors') for path in imagepaths: tensor = path_to_tensor(path, target_size=target_size) tensor_list.append(tensor) preprocess_fun = preprocess_input_wrapper(model) tensors = np.vstack(tensor_list) tensors = preprocess_fun(tensors) if verbose: print('Finished: images -> tensors') if save: if not filename: filename = 'x_{}.h5'.format(config.model) filepath = os.path.join(config.precomputed_dir, filename) utils.remove_file(filepath) if verbose: print('Started saving {}'.format(filename)) with h5py.File(filepath, 'w') as hf: hf.create_dataset('data', data=tensors) if verbose: print('Finished saving {}'.format(filename)) else: return tensors
def export_skin(self, filepath): '''Export skin.''' # Build temp config file. config_filepath = os.path.join("/tmp/%s", str(uuid.uuid4())) touch_file(config_filepath) self.save_skin(config_filepath) # Build skin package. with tarfile.open("%s.tar.gz" % filepath, "w:gz") as tar: # Add config file. tar.add(config_filepath, "config.ini", False) # Add background image file. tar.add(self.get_skin_file_path(self.image), self.image, False) # Copy theme files is theme is not standard theme. if not self.theme_name in COLOR_SEQUENCE: tar.add(os.path.join(self.ui_theme_dir, self.theme_name), os.path.join("ui_theme", self.theme_name)) tar.add(os.path.join(self.app_theme_dir, self.theme_name), os.path.join("app_theme", self.theme_name)) # Remove temp config file. remove_file(config_filepath)
def next(): if YoutubeHelper.music_thread is not None: if YoutubeHelper.queue_instance.is_empty(): GM.gui.quick_gui( "The youtube queue is empty, so I can't go to the next song.", text_type='header', box_align='left') return GM.gui.quick_gui("Going to next available track.", text_type='header', box_align='left') GM.logger.info( "The youtube audio queue moved to the next available track.") try: utils.remove_file( f"{YoutubeHelper.current_song_info['img_id']}.jpg", utils.get_temporary_img_dir()) except FileNotFoundError: pass stop_audio() download_next() play_audio() return return
def get(self): root = self.get_argument("root", None) relative_file = self.get_argument("file", None) if not self.is_auth(root): return logging.info("RemoveHandler root:{} file: {} ".format( root, relative_file)) if root and relative_file: if utils.remove_file(root, relative_file): self.write(json.dumps(dict(status=0))) else: self.write( json.dumps(dict(status=1, message="file not remove"))) else: self.write(json.dumps(dict(status=1, message="param is none")))
def main(): generate_file(1024 * 100) elapsed_time: float = timeit.timeit( 'compress_decompress()', setup='from __main__ import compress_decompress', number=100, ) print(f'time = {elapsed_time} sec') remove_file('_test') remove_file('_test_decomp') remove_file('test.huf')
async def post(self): root = self.get_argument("root", None) relative_file = self.get_argument("file", None) if not self.is_auth(root): return logging.info("UploadHandler root:{} file: {} ".format( root, relative_file)) abs_file = os.path.join(root, relative_file) if os.path.exists(abs_file) and not os.access(abs_file, os.W_OK): self.write( json.dumps( dict(status=1, message="no write permission:{}".format(abs_file)))) else: if abs_file: logging.info("download file start:{}".format(abs_file)) cache_file = os.path.join( root, utils.cache_dir, os.path.basename(relative_file) + "_" + str(uuid.uuid4())) async with aiofiles.open(cache_file, 'wb') as f: await f.write(self.request.body) logging.info("download file end:{}".format(abs_file)) if utils.remove_file(root, relative_file): if utils.move_file(cache_file, abs_file): self.write(json.dumps(dict(status=0))) else: logging.warning("move file error {} -> {}".format( cache_file, abs_file)) self.write( json.dumps( dict(status=1, message="cache to file move error"))) else: logging.warning("remove file error {}".format(abs_file)) self.write( json.dumps(dict(status=1, message="file not remove"))) if os.path.exists(cache_file): os.remove(cache_file) else: self.write(json.dumps(dict(status=1, message="file is none")))
def main(): global already_sent_sms, already_sent, already_recording, t1_stop, t1 # catch signals signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) if IS_ERROR: log.error("Unable to start PIR!") remove_file(os.path.join(os.path.dirname(__file__), 'pir.pid')) sys.exit(1) # Use BCM GPIO references # instead of physical pin numbers GPIO.setmode(GPIO.BCM) # Alerts OFF GPIO.setwarnings(False) # Define GPIO to use on Pi GPIO_PIR = 7 log.info("PIR Module Test (CTRL-C to exit)") # Set pin as input GPIO.setup(GPIO_PIR, GPIO.IN) Current_State = 0 Previous_State = 0 try: log.warn("Waiting for PIR to settle ...") # Loop until PIR output is 0 while GPIO.input(GPIO_PIR) == 1: Current_State = 0 log.info(" Ready") # Loop until users quits with CTRL-C while True: # Read PIR state Current_State = GPIO.input(GPIO_PIR) if Current_State == 1 and Previous_State == 0: # PIR is triggered log.info(" Motion detected!") # sms if not already_sent_sms and CONFIG['send_sms']: log.info(" Sending sms through email...") sendEmail( CONFIG['email_sms'], CONFIG['email_from_addr'], 'Motion detected', 'There\'s been movement detected in the house.', CONFIG['email_smtp'], CONFIG['email_smtp_port'], CONFIG['email_user'], CONFIG['email_passwd']) already_sent_sms = 1 # GV sms if not already_sent_sms and CONFIG['send_gv_sms']: log.info(" Sending sms through GV...") sendGVSMS( CONFIG['gv_user'], CONFIG['gv_passwd'], CONFIG['sms_num'], 'There\'s been movement detected in the house.') already_sent_sms = 1 # email if not already_sent and CONFIG['email_on_motion']: log.info(" Sending email...") sendEmail( CONFIG['email_send_to'], CONFIG['email_from_addr'], 'Motion detected', 'There\'s been movement detected in the house.', CONFIG['email_smtp'], CONFIG['email_smtp_port'], CONFIG['email_user'], CONFIG['email_passwd']) already_sent = 1 # record if not already_recording and CONFIG['record_on_motion']: log.info(" Starting recording!") t1_stop = threading.Event() t1 = threading.Thread(target=startRecord, args=( 1, t1_stop, )) t1.start() already_recording = 1 # arm camera if CONFIG['arm_camera']: armCamera(1) # Record previous state Previous_State = 1 elif Current_State == 0 and Previous_State == 1: # PIR has returned to ready state log.info(" Ready") Previous_State = 0 # Wait for 10 milliseconds time.sleep(0.01) except KeyboardInterrupt: log.warn(" Quit") # Reset GPIO settings GPIO.cleanup()
def text_recognition(path, config): root, ext = os.path.splitext(path) txt_path = root + ".txt" if os.path.exists(txt_path): with open(txt_path) as f: out = json.loads(open(txt_path).read()) return out from google.cloud import speech from google.cloud.speech import enums from google.cloud.speech import types out = {} error_count = 0 tmp_path = os.path.splitext(path)[0] + ".tmp.wav" while True: try: client = speech.SpeechClient() content = load_audio( path, pre_silence_length=config.pre_silence_length, post_silence_length=config.post_silence_length) max_duration = config.max_duration - \ config.pre_silence_length - config.post_silence_length audio_duration = get_duration(content) if audio_duration >= max_duration: print(" [!] Skip {} because of duration: {} > {}". \ format(path, audio_duration, max_duration)) return {} content = resample_audio(content, config.sample_rate) save_audio(content, tmp_path, config.sample_rate) with io.open(tmp_path, 'rb') as f: audio = types.RecognitionAudio(content=f.read()) config = types.RecognitionConfig( encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16, sample_rate_hertz=config.sample_rate, language_code='ko-KR') response = client.recognize(config, audio) if len(response.results) > 0: alternatives = response.results[0].alternatives results = [ alternative.transcript for alternative in alternatives ] assert len(results) == 1, "More than 1 results: {}".format( results) out = {path: "" if len(results) == 0 else results[0]} print(path, results[0]) break break except Exception as err: raise Exception("OS error: {0}".format(err)) error_count += 1 print("Skip warning for {} for {} times". \ format(path, error_count)) if error_count > 5: break else: continue remove_file(tmp_path) with open(txt_path, 'w') as f: json.dump(out, f, indent=2, ensure_ascii=False) return out
return results if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--audio_pattern', required=True) parser.add_argument('--recognition_filename', default="recognition.json") parser.add_argument('--sample_rate', default=16000, type=int) parser.add_argument('--pre_silence_length', default=1, type=int) parser.add_argument('--post_silence_length', default=1, type=int) parser.add_argument('--max_duration', default=60, type=int) config, unparsed = parser.parse_known_args() audio_dir = os.path.dirname(config.audio_pattern) for tmp_path in glob(os.path.join(audio_dir, "*.tmp.*")): remove_file(tmp_path) paths = glob(config.audio_pattern) paths.sort() results = text_recognition_batch(paths, config) base_dir = os.path.dirname(audio_dir) recognition_path = \ os.path.join(base_dir, config.recognition_filename) if os.path.exists(recognition_path): backup_file(recognition_path) write_json(recognition_path, results)
def run(self): _program = 'migasfree upload' parser = optparse.OptionParser( description=_program, prog=self.CMD, version=self.release, usage='%prog options' ) print(_('%(program)s version: %(version)s') % { 'program': _program, 'version': self.release }) # migasfree-upload {-f file [--regular-file] | -d dir [-n name]} # [[-u user] [-p pwd] [--main-project project] [-s store] [--no-create-repo]] parser.add_option( "--file", "-f", action="store", help=_('File to upload at server') ) parser.add_option( "--regular-file", "-r", action="store_true", help=_('File is not a software package') ) parser.add_option( "--dir", "-d", action="store", help=_('Directory with files to upload at server') ) parser.add_option( "--name", "-n", action="store", help=_('Name of the directory at server') ) parser.add_option( "--user", "-u", action="store", help=_('Authorized user to upload at server') ) parser.add_option( "--pwd", "-p", action="store", help=_('User password') ) parser.add_option( "--main-project", "-m", action="store", help=_('Project to upload files') ) parser.add_option( "--store", "-s", action="store", help=_('Store to upload files at server') ) parser.add_option( "--no-create-repo", "-c", action="store_true", help=_('No create repository after upload file at server') ) options, arguments = parser.parse_args() # check restrictions if not options.file and not options.dir: self._usage_examples() parser.error(_('File or Dir options are mandatory!!!')) if options.file and options.dir: self._usage_examples() parser.error(_('File and Dir options are exclusive!!!')) if options.regular_file and options.dir: self._usage_examples() parser.error(_('This option does not apply with Dir option!!!')) if options.name and options.file: self._usage_examples() parser.error(_('This option does not apply with File option!!!')) utils.check_lock_file(self.CMD, self.LOCK_FILE) # assign config options if options.user: self.packager_user = options.user if options.pwd: self.packager_pwd = options.pwd if options.main_project: self.packager_project = options.main_project if options.store: self.packager_store = options.store if options.no_create_repo: self._create_repo = not options.no_create_repo # actions dispatcher if options.file: self._file = options.file self._is_regular_file = (options.regular_file is True) if self._is_regular_file: self._create_repo = False elif options.dir: self._directory = options.dir if options.name: self._server_directory = options.name else: self._server_directory = options.dir else: parser.print_help() self._usage_examples() self._left_parameters() self.auto_register_user = self.packager_user self.auto_register_password = self.packager_pwd self.auto_register_command = 'get_key_packager' self._show_running_options() if self._file: self._upload_file() else: self._upload_set() utils.remove_file(self.LOCK_FILE) sys.exit(os.EX_OK) # no error
def run(self): _program = 'migasfree client' parser = optparse.OptionParser( description=_program, prog=self.CMD, version=__version__, usage='%prog options' ) print(_('%(program)s version: %(version)s') % { 'program': _program, 'version': __version__ }) parser.add_option("--register", "-g", action="store_true", help=_('Register computer at server')) parser.add_option("--update", "-u", action="store_true", help=_('Update system from repositories')) parser.add_option("--search", "-s", action="store", help=_('Search package in repositories')) parser.add_option( "--install", "-i", action="store_true", #help=_('Install package or device') help=_('Install package') ) parser.add_option( "--remove", "-r", action="store_true", #help=_('Remove package or device') help=_('Remove package') ) parser.add_option("--package", "-p", action="store", help=_('Package to install or remove')) #parser.add_option("--device", "-d", action = "store", # help = _('Device to install or remove')) options, arguments = parser.parse_args() # check restrictions if options.register and \ (options.install or options.remove or options.update or options.search): self._usage_examples() parser.error(_('Register option is exclusive!!!')) if options.update and \ (options.install or options.remove or options.search): self._usage_examples() parser.error(_('Update option is exclusive!!!')) if options.search and (options.install or options.remove): self._usage_examples() parser.error(_('Search option is exclusive!!!')) if options.install and options.remove: parser.print_help() self._usage_examples() parser.error(_('Install and remove are exclusive!!!')) if options.install and not (options.package or options.device): self._usage_examples() #parser.error(_('Install needs package or device!!!')) parser.error(_('Install needs a package!!!')) if options.remove and not (options.package or options.device): self._usage_examples() #parser.error(_('Remove needs package or device!!!')) parser.error(_('Remove needs a package!!!')) utils.check_lock_file(self.CMD, self.LOCK_FILE) self._show_running_options() # actions dispatcher if options.update: self._update_system() elif options.register: self._register_computer() elif options.search: self._search(options.search) elif options.install and options.package: self._install_package(options.package) elif options.install and options.device: self._install_device(options.device) elif options.remove and options.package: self._remove_package(options.package) elif options.remove and options.device: self._remove_device(options.device) else: parser.print_help() self._usage_examples() utils.remove_file(self.LOCK_FILE) sys.exit(os.EX_OK) # no error