def get_files_packed(self): data_list = listdir_files(self.dataset, recursive=True, filter_ext=['.npz']) if self.shuffle: random.shuffle(data_list) # val set if self.val_dir is not None: val_set = listdir_files(self.val_dir, recursive=True, filter_ext=['.npz']) self.val_steps = len(val_set) self.val_size = self.val_steps * self.batch_size self.val_set = val_set[:self.val_steps] eprint('validation set: {}'.format(self.val_size)) elif self.val_size is not None: self.val_steps = self.val_size // self.batch_size assert self.val_steps < len(data_list) self.val_size = self.val_steps * self.batch_size self.val_set = data_list[:self.val_steps] data_list = data_list[self.val_steps:] eprint('validation set: {}'.format(self.val_size)) # main set self.epoch_steps = len(data_list) self.epoch_size = self.epoch_steps * self.batch_size if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.main_set = data_list
def get_files(self): data_list = listdir_files(self.dataset, filter_ext=['.npz']) # val set if self.val_size is not None: assert self.val_size < len(data_list) self.val_steps = self.val_size // self.batch_size self.val_size = self.val_steps * self.batch_size self.val_set = data_list[:self.val_size] data_list = data_list[self.val_size:] eprint('validation set: {}'.format(self.val_size)) # main set assert self.batch_size <= len(data_list) self.epoch_steps = len(data_list) // self.batch_size self.epoch_size = self.epoch_steps * self.batch_size if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.main_set = data_list[:self.epoch_size] # print eprint( 'main set: {}\nepoch steps: {}\nnum epochs: {}\nmax steps: {}\n'. format(len(self.main_set), self.epoch_steps, self.num_epochs, self.max_steps))
def get_files_origin(self): data_list = listdir_files(self.dataset, recursive=True, filter_ext=['.npz']) # return if self.shuffle: random.shuffle(data_list) return data_list
def list_local(params): log("list_local: params: %s" % params) top_folder = utils.unquote_plus(params.get("folder")) type = utils.unquote_plus(params.get("type")) for folder in utils.listdir_dirs(top_folder): folder_path = utils.join(top_folder, folder) # Check if the folder contains a single nzb and no folders nzb_list = [] folder_list = [] for file in utils.listdir_files(folder_path): file_path = utils.join(folder_path, file) ext = os.path.splitext(file_path)[1] if ext == '.nzb' or ext == '.gz' or ext == '.zip': nzb_list.append(file_path) for sub_folder in utils.listdir_dirs(folder_path): folder_list.append(sub_folder) # If single nzb allow the folder to be playable and show info if len(nzb_list) == 1 and len(folder_list) == 0: # Fixing the naming of nzb according to SAB rules nzb_name = m_nzb.Nzbname(os.path.basename(nzb_list[0])).final_name if folder.lower() == nzb_name.lower(): info = nfo.ReadNfoLabels(folder_path) info.info_labels['title'] = info.info_labels['title'] url = "&nzbname=" + utils.quote_plus(nzb_name) +\ "&nzb=" + utils.quote_plus(nzb_list[0]) + "&type=" + type add_posts(info.info_labels, url, MODE_LOCAL_FILE_IN_DIR, info.thumbnail, info.fanart, False) else: url = "&type=" + type + "&folder=" + utils.quote_plus( folder_path) add_posts({'title': folder}, url, MODE_LOCAL_LIST, '', '') else: url = "&type=" + type + "&folder=" + utils.quote_plus(folder_path) add_posts({'title': folder}, url, MODE_LOCAL_LIST, '', '') for file in utils.listdir_files(top_folder): ext = os.path.splitext(file)[1] if ext == '.nzb' or ext == '.gz' or ext == '.zip': file_path = utils.join(top_folder, file) url = "&nzbname=" + utils.quote_plus(m_nzb.Nzbname(file).final_name) +\ "&nzb=" + utils.quote_plus(file_path) + "&type=" + type add_posts({'title': file}, url, MODE_LOCAL_FILE, '', '', False) xbmcplugin.setContent(HANDLE, 'movies') xbmcplugin.endOfDirectory(HANDLE, succeeded=True, cacheToDisc=True) return
def list_local(params): log("list_local: params: %s" % params) top_folder = utils.unquote_plus(params.get("folder")) type = utils.unquote_plus(params.get("type")) for folder in utils.listdir_dirs(top_folder): folder_path = utils.join(top_folder, folder) # Check if the folder contains a single nzb and no folders nzb_list = [] folder_list = [] for file in utils.listdir_files(folder_path): file_path = utils.join(folder_path, file) ext = os.path.splitext(file_path)[1] if ext == '.nzb' or ext == '.gz' or ext == '.zip': nzb_list.append(file_path) for sub_folder in utils.listdir_dirs(folder_path): folder_list.append(sub_folder) # If single nzb allow the folder to be playable and show info if len(nzb_list) == 1 and len(folder_list) == 0: # Fixing the naming of nzb according to SAB rules nzb_name = m_nzb.Nzbname(os.path.basename(nzb_list[0])).final_name if folder.lower() == nzb_name.lower(): info = nfo.ReadNfoLabels(folder_path) info.info_labels['title'] = info.info_labels['title'] url = "&nzbname=" + utils.quote_plus(nzb_name) +\ "&nzb=" + utils.quote_plus(nzb_list[0]) + "&type=" + type add_posts(info.info_labels, url, MODE_PLAY, info.thumbnail, info.fanart, False) else: url = "&type=" + type + "&folder=" + utils.quote_plus(folder_path) add_posts({'title':folder}, url, MODE_LOCAL_LIST, '', '') else: url = "&type=" + type + "&folder=" + utils.quote_plus(folder_path) add_posts({'title':folder}, url, MODE_LOCAL_LIST, '', '') for file in utils.listdir_files(top_folder): ext = os.path.splitext(file)[1] if ext == '.nzb' or ext == '.gz' or ext == '.zip': file_path = utils.join(top_folder, file) url = "&nzbname=" + utils.quote_plus(m_nzb.Nzbname(file).final_name) +\ "&nzb=" + utils.quote_plus(file_path) + "&type=" + type add_posts({'title':file}, url, MODE_PLAY, '', '', False) xbmcplugin.setContent(HANDLE, 'movies') xbmcplugin.endOfDirectory(HANDLE, succeeded=True, cacheToDisc=True) return
def del_local_file_in_dir(params): log("del_local_file_in_dir: params: %s" % params) local_file = utils.unquote_plus(params.get("nzb")) local_path = os.path.dirname(local_file) if xbmcgui.Dialog().yesno("Pneumatic", "Delete:", "%s" % local_path): for file in utils.listdir_files(local_path): local_file_path = utils.join(local_path, file) log("del_local_file_in_dir: delete: %s" % local_file_path) utils.delete(local_file_path) log("del_local_file_in_dir: rmdir: %s" % local_path) utils.rmdir(local_path) xbmc.executebuiltin("Container.Refresh")
def get_files_origin(self): import re # get all the audio files files = listdir_files(self.dataset, filter_ext=['.wav', '.m4a', '.mp3']) regex = re.compile(r'^(.*[/\\])?(.+?)[-_](.+?)(\..+?)$') matches = [re.findall(regex, f)[0][1:3] for f in files] person_ids, speech_ids = [self.ordered_ids(list(i)) for i in zip(*matches)] self.num_labels = max(speech_ids) + 1 # data list data_list = [(files[i], speech_ids[i]) for i in range(len(files))] self.group_shuffle(data_list, self.batch_size, self.shuffle, self.group_size) # return return data_list
def subfunc(ipath, opath, d, ffmpeg='ffmpeg', iext='.m4a', oext='.wav'): ifiles = listdir_files(d, filter_ext=[iext]) for ifile in ifiles: ofile = ifile.replace(ipath, opath, 1) ofile = os.path.splitext(ofile)[0] + oext save_path = os.path.split(ofile)[0] if not os.path.exists(save_path): os.makedirs(save_path) elif os.path.exists(ofile): continue cmdline = '{} -y -i {} -vn -c:a pcm_s16le {}'.format( ffmpeg, ifile, ofile) std_redirect = subprocess.PIPE subprocess.run(cmdline.split(), stdout=std_redirect, stderr=std_redirect)
def get_files_origin(self): dataset_ids = os.listdir(self.dataset) num_ids = len(dataset_ids) self.num_ids = num_ids dataset_ids = [os.path.join(self.dataset, i) for i in dataset_ids] # data list data_list = [] for i in range(num_ids): files = listdir_files(dataset_ids[i], filter_ext=['.wav', '.m4a', '.mp3']) for f in files: data_list.append((i, f)) self.group_shuffle(data_list, self.batch_size, self.shuffle, self.group_size) # return return data_list
def run_steps(self, sess): import re prefix = 'model_' # get checkpoints of every few steps ckpts = listdir_files(self.train_dir, recursive=False, filter_ext=['.index']) ckpts = [os.path.splitext(f)[0] for f in ckpts if prefix in f] ckpts.sort() stats = [] # test all the checkpoints for ckpt in ckpts: self.saver.restore(sess, ckpt) # to be fetched fetch = self.losses losses_sum = [0 for _ in range(len(self.losses))] # run session for step in range(self.epoch_steps): feed_dict = { 'inputs:0': self.test_inputs[step], 'labels:0': self.test_labels[step] } ret = sess.run(fetch, feed_dict) ret_losses = ret # sum of losses for i in range(len(self.losses)): losses_sum[i] += ret_losses[i] # summary losses_mean = [l / self.epoch_steps for l in losses_sum] # stats ckpt_num = re.findall(prefix + r'(\d+)', ckpt)[0] stats.append(np.array([float(ckpt_num)] + losses_mean)) # save stats import matplotlib.pyplot as plt stats = np.stack(stats) np.save(os.path.join(self.test_dir, 'stats.npy'), stats) # save plot fig, ax = plt.subplots() ax.set_title('Test Error with Training Progress') ax.set_xlabel('training steps') ax.set_ylabel('MAD (RGB)') ax.set_xscale('linear') ax.set_yscale('log') stats = stats[1:] ax.plot(stats[:, 0], stats[:, 2]) plt.tight_layout() plt.savefig(os.path.join(self.test_dir, 'stats.png')) plt.close()
def get_files_origin(self): import re # get all the audio files filter_ext = ['.wav', '.flac', '.m4a', '.mp3'] data_list = listdir_files(self.dataset, filter_ext=filter_ext) # filter files by length data_list = self.filter_files(data_list, 2.0, None) data_list.sort() # list of pairs regex = re.compile(r'伴奏|instru|vocal') bgm_list = [f for f in data_list if re.findall(regex, f)] song_list = [f for f in data_list if f not in bgm_list] data_list = list(zip(song_list, bgm_list)) if len(data_list) < 1024: data_list *= (1024 + len(data_list) - 1) // len(data_list) # return if self.shuffle: random.shuffle(data_list) return data_list
def get_dataset(self): files = listdir_files(self.dataset, filter_ext=['.jpeg', '.jpg', '.png']) # random shuffle import random random.shuffle(files) # size of dataset self.epoch_steps = len(files) // self.batch_size self.epoch_size = self.epoch_steps * self.batch_size if not self.training: self.num_epochs = 1 self.max_steps = self.epoch_steps if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.config.num_epochs = self.num_epochs self.files = files[:self.epoch_size] eprint( 'data set: {}\nepoch steps: {}\nnum epochs: {}\nmax steps: {}\n'. format(len(self.files), self.epoch_steps, self.num_epochs, self.max_steps))
def get_dataset(cls, config): exts = ['.bmp', '.png', '.jpg', '.jpeg', '.webp', '.jp2', '.tiff'] dataset = listdir_files(config.input_dir, recursive=True, filter_ext=exts) return dataset