def test_benchmark_mp(): dut = DATASETS['DIV2K'] epochs = 4 config = Config(batch=8, scale=4, depth=1, patch_size=196, steps_per_epoch=100, convert_to='RGB', crop='random') loader = QuickLoader(dut, 'train', config, True, n_threads=8) for _ in range(epochs): r = loader.make_one_shot_iterator() list(r)
def test_memory_usage(): dut = DATASETS['GOPRO'] epochs = 4 config = Config(batch=16, scale=4, depth=1, patch_size=196, steps_per_epoch=100, convert_to='RGB', crop='random') loader = QuickLoader(dut, 'train', config, True, n_threads=8) for i in range(epochs): it = loader.make_one_shot_iterator('1GB', True) loader.prefetch('1GB') list(it)
def test_quickloader_iter(): dut = DATASETS['DIV2K'] config = Config(batch=16, scale=4, depth=1, steps_per_epoch=200, convert_to='RGB', crop='random') config.patch_size = 48 r = QuickLoader(dut, 'train', config, True, n_threads=8) it = r.make_one_shot_iterator('8GB') for hr, lr, name in it: print(name, flush=True) it = r.make_one_shot_iterator('8GB') for hr, lr, name in it: print(name, flush=True)
def predict(self, files, mode='pil-image1', depth=1, **kwargs): r"""Predict output for frames Args: files: a list of frames as inputs mode: specify file format. `pil-image1` for PIL supported images, or `NV12/YV12/RGB` for raw data depth: specify length of sequence of images. 1 for images, >1 for videos """ sess = tf.get_default_session() ckpt_last = self._restore_model(sess) files = [Path(file) for file in to_list(files)] data = Dataset(test=files, mode=mode, depth=depth, modcrop=False, **kwargs) loader = QuickLoader(1, data, 'test', self.model.scale, -1, crop=None, **kwargs) it = loader.make_one_shot_iterator() if len(it): print('===================================') print(f'Predicting model: {self.model.name} by {ckpt_last}') print('===================================') else: return for img in tqdm.tqdm(it, 'Infer', ascii=True): feature, label, name = img[self.fi], img[self.li], img[-1] tf.logging.debug('output: ' + str(name)) for fn in self.feature_callbacks: feature = fn(feature, name=name) outputs = self.model.test_batch(feature, None) for fn in self.output_callbacks: outputs = fn(outputs, input=img[self.fi], label=img[self.li], mode=loader.color_format, name=name)
def main(*args): for name in FLAGS.dataset: name = name.upper() d = DATASETS.get(name) if not d: tf.logging.error('Could not find ' + name) return # calc mean [R G B] loader = QuickLoader(1, d, 'train', 1, convert_to='RGB') colors = [] for img, _, _ in loader.make_one_shot_iterator(shard=8): rgb = np.reshape(img, [-1, 3]) colors.append(rgb) colors = np.concatenate(colors) mean_colors = colors.mean(axis=0, keepdims=True) SAVE[f'{name}_MEAN'] = mean_colors if FLAGS.std: std_colors = colors.std(axis=0, keepdims=True) SAVE[f'{name}_STD'] = std_colors if FLAGS.fid: # activation of pool 3 inception_pb = FID.check_or_download_inception(FLAGS.model_path) FID.create_inception_graph(inception_pb) imgs = [] for img, _, _ in loader.make_one_shot_iterator(shard=8): imgs += [ imresize(array_to_img(img[0], 'RGB'), 0, size=[299, 299]) ] imgs = np.stack(imgs) with tf.Session() as sess: acts = FID.get_activations(imgs, sess) mu = acts.mean(axis=0) sigma = np.cov(acts, rowvar=False) SAVE[f'{name}_FID_MU'] = mu SAVE[f'{name}_FID_SIGMA'] = sigma np.savez_compressed(FLAGS.output, **SAVE)
def test(self, dataset, **kwargs): r"""Test model with test sets in dataset Args: dataset: instance of dataset, with dataset.test valid """ sess = tf.get_default_session() ckpt_last = self._restore_model(sess) loader = QuickLoader(1, dataset, 'test', self.model.scale, -1, crop=None, **kwargs) it = loader.make_one_shot_iterator() if len(it): print('===================================') print(f'Testing model: {self.model.name} by {ckpt_last}') print('===================================') else: return for img in tqdm.tqdm(it, 'Test', ascii=True): feature, label, name = img[self.fi], img[self.li], img[-1] tf.logging.debug('output: ' + str(name)) for fn in self.feature_callbacks: feature = fn(feature, name=name) for fn in self.label_callbacks: label = fn(label, name=name) outputs = self.model.test_batch(feature, None) for fn in self.output_callbacks: outputs = fn(outputs, input=img[self.fi], label=img[self.li], mode=loader.color_format, name=name, subdir=dataset.name)
def main(*args): if not opt.input_dir: raise ValueError("--input_dir is required") if not opt.dataset.upper() in DATASETS.keys(): raise ValueError("--dataset is missing, or can't be found") data_ref = DATASETS.get(opt.dataset.upper()) data_ref.setattr(depth=opt.clip) data = load_folder(opt.input_dir) data.setattr(depth=opt.clip) skip = opt.offset loader = QuickLoader(1, data, 'test', 1, convert_to='RGB', crop='not') ref_loader = QuickLoader(1, data_ref, 'test', 1, convert_to='RGB', crop='not') # make sure len(ref_loader) == len(loader) loader_iter = loader.make_one_shot_iterator() ref_iter = ref_loader.make_one_shot_iterator() for ref, _, name in ref_iter: name = str(name) img, _, _ = next(loader_iter) # reduce the batch dimension for video clips if img.ndim == 5: img = img[0] if ref.ndim == 5: ref = ref[0] if opt.shave: img = shave(img, opt.shave) ref = shave(ref, opt.shave) if opt.l_only: img = rgb_to_yuv(img, max_val=255, standard=opt.l_standard)[..., 0:1] ref = rgb_to_yuv(ref, max_val=255, standard=opt.l_standard)[..., 0:1] if ref.shape[0] - skip != img.shape[0]: b_min = np.minimum(ref.shape[0] - skip, img.shape[0]) ref = ref[:b_min + skip, ...] img = img[:b_min, ...] img = tf.constant(img.astype(np.float32)) ref = tf.constant(ref.astype(np.float32)) psnr = tf.reduce_mean(tf.image.psnr( ref[skip:], img, 255)).eval() if not opt.no_psnr else 0 ssim = tf.reduce_mean(tf.image.ssim( ref[skip:], img, 255)).eval() if not opt.no_ssim else 0 tf.logging.info(f'[{name}] PSNR = {psnr}, SSIM = {ssim}') tf.add_to_collection('PSNR', psnr) tf.add_to_collection('SSIM', ssim) for key in ('PSNR', 'SSIM'): mp = np.mean(tf.get_collection(key)) tf.logging.info(f'Mean {key}: {mp}')
def test_read_flow(): from VSR.Framework.Callbacks import _viz_flow dut = DATASETS['MINICHAIRS'] config = Config(batch=8, scale=1, depth=2, patch_size=96, steps_per_epoch=100, convert_to='RGB', crop='random') loader = QuickLoader(dut, 'train', config, True, n_threads=8) r = loader.make_one_shot_iterator('1GB', shuffle=True) loader.prefetch('1GB') list(r) r = loader.make_one_shot_iterator('8GB', shuffle=True) img, flow, name = list(r)[0] ref0 = img[0, 0, ...] ref1 = img[0, 1, ...] u = flow[0, 0, ..., 0] v = flow[0, 0, ..., 1] ImageProcess.array_to_img(ref0, 'RGB').show() ImageProcess.array_to_img(ref1, 'RGB').show() ImageProcess.array_to_img(_viz_flow(u, v), 'RGB').show()
def main(): flags, args = parser.parse_known_args() opt = Config() for pair in flags._get_kwargs(): opt.setdefault(*pair) data_config_file = Path(flags.data_config) if not data_config_file.exists(): raise RuntimeError("dataset config file doesn't exist!") for _ext in ('json', 'yaml', 'yml'): # for compat # apply a 2-stage (or master-slave) configuration, master can be # override by slave model_config_root = Path('Parameters/root.{}'.format(_ext)) if opt.p: model_config_file = Path(opt.p) else: model_config_file = Path('Parameters/{}.{}'.format( opt.model, _ext)) if model_config_root.exists(): opt.update(Config(str(model_config_root))) if model_config_file.exists(): opt.update(Config(str(model_config_file))) model_params = opt.get(opt.model, {}) opt.update(model_params) suppress_opt_by_args(model_params, *args) model = get_model(flags.model)(**model_params) if flags.cuda: model.cuda() root = f'{flags.save_dir}/{flags.model}' if flags.comment: root += '_' + flags.comment verbosity = logging.DEBUG if flags.verbose else logging.INFO trainer = model.trainer datasets = load_datasets(data_config_file) dataset = datasets[flags.dataset.upper()] train_config = Config(crop=opt.train_data_crop, feature_callbacks=[], label_callbacks=[], convert_to='rgb', **opt) if opt.channel == 1: train_config.convert_to = 'gray' if opt.lr_decay: train_config.lr_schedule = lr_decay(lr=opt.lr, **opt.lr_decay) train_config.random_val = not opt.traced_val train_config.cuda = flags.cuda if opt.verbose: dump(opt) with trainer(model, root, verbosity, opt.pth) as t: if opt.seed is not None: t.set_seed(opt.seed) tloader = QuickLoader(dataset, 'train', train_config, True, flags.thread) vloader = QuickLoader(dataset, 'val', train_config, False, flags.thread, batch=1, crop=opt.val_data_crop, steps_per_epoch=opt.val_num) t.fit([tloader, vloader], train_config) if opt.export: t.export(opt.export)
def fit(self, batch=32, epochs=1, steps_per_epoch=200, dataset=None, learning_rate=1e-4, learning_rate_schedule=None, restart=False, validate_numbers=1, validate_every_n_epoch=1, augmentation=False, parallel=1, memory_usage=None, **kwargs): """Train the model. Args: batch: the size of mini-batch during training epochs: the total training epochs steps_per_epoch: training steps of each epoch dataset: the Dataset object, used to get training and validation frames learning_rate: the initial learning rate learning_rate_schedule: a callable to adjust learning rate. The signature is `fn(learning_rate, epochs, steps, loss)` restart: if True, start training from scratch, regardless of saved checkpoints validate_numbers: the number of patches in validation validate_every_n_epoch: run validation every n epochs augmentation: a boolean representing conduct image augmentation (random flip and rotate) parallel: a scalar representing threads number of loading dataset memory_usage: a string or integer, limiting maximum usage of CPU memory. (i.e. 1024MB, 8GB...) """ sess = tf.get_default_session() if sess is None: raise RuntimeError('No session initialized') if not self.model.compiled: tf.logging.error('[Warning] model not compiled, compiling now...') self.model.compile() sess.run(tf.global_variables_initializer()) init_epoch = 1 if restart else self._restore_model(sess) + 1 if init_epoch > epochs: return print('===================================') print(f'Training model: {self.model.name.upper()}') print('===================================') self.model.display() summary_writer = tf.summary.FileWriter(str(self.logdir), graph=tf.get_default_graph()) lr = learning_rate global_step = self.model.global_steps.eval() if learning_rate_schedule and callable(learning_rate_schedule): lr = learning_rate_schedule(lr, epochs=init_epoch, steps=global_step) if parallel == 1: train_loader = QuickLoader(batch, dataset, 'train', self.model.scale, steps_per_epoch, crop='random', augmentation=augmentation, **kwargs) else: train_loader = MpLoader(batch, dataset, 'train', self.model.scale, steps_per_epoch, crop='random', augmentation=augmentation, **kwargs) val_loader = QuickLoader(batch, dataset, 'val', self.model.scale, validate_numbers, crop='center', **kwargs) for epoch in range(init_epoch, epochs + 1): train_iter = train_loader.make_one_shot_iterator(memory_usage, shard=parallel, shuffle=True) date = time.strftime('%Y-%m-%d %T', time.localtime()) print(f'| {date} | Epoch: {epoch}/{epochs} | LR: {lr} |') avg_meas = {} with tqdm.tqdm(train_iter, unit='batch', ascii=True) as r: for img in r: feature, label, name = img[self.fi], img[self.li], img[-1] for fn in self.feature_callbacks: feature = fn(feature, name=name) for fn in self.label_callbacks: label = fn(label, name=name) loss = self.model.train_batch(feature=feature, label=label, learning_rate=lr, epochs=epoch) global_step = self.model.global_steps.eval() if learning_rate_schedule and callable( learning_rate_schedule): lr = learning_rate_schedule(lr, epochs=epoch, steps=global_step) for k, v in loss.items(): avg_meas[k] = avg_meas[k] + [v] if avg_meas.get( k) else [v] loss[k] = '{:08.5f}'.format(v) r.set_postfix(loss) for k, v in avg_meas.items(): print(f'| Epoch average {k} = {np.mean(v):.6f} |') if epoch % validate_every_n_epoch: continue val_metrics = {} val_iter = val_loader.make_one_shot_iterator(memory_usage, shard=parallel, shuffle=False) for img in val_iter: feature, label, name = img[self.fi], img[self.li], img[-1] for fn in self.feature_callbacks: feature = fn(feature, name=name) for fn in self.label_callbacks: label = fn(label, name=name) metrics, val_summary_op, _ = self.model.validate_batch( feature=feature, label=label, epochs=epoch) for k, v in metrics.items(): if k not in val_metrics: val_metrics[k] = [] val_metrics[k] += [v] summary_writer.add_summary(val_summary_op, global_step) for k, v in val_metrics.items(): print(f'{k}: {np.mean(v):.6f}', end=', ') print('') self._save_model(sess, epoch) # flush all pending summaries to disk summary_writer.close()
def main(*args, **kwargs): flags = tf.flags.FLAGS check_args(flags) opt = Config() for key in flags: opt.setdefault(key, flags.get_flag_value(key, None)) opt.steps_per_epoch = opt.num # set random seed at first np.random.seed(opt.seed) # check output dir output_dir = Path(flags.save_dir) output_dir.mkdir(exist_ok=True, parents=True) writer = tf.io.TFRecordWriter( str(output_dir / "{}.tfrecords".format(opt.dataset))) data_config_file = Path(opt.data_config) if not data_config_file.exists(): raise RuntimeError("dataset config file doesn't exist!") crf_matrix = np.load(opt.crf) if opt.crf else None # init loader config train_data, _, _ = Run.fetch_datasets(data_config_file, opt) train_config, _, _ = Run.init_loader_config(opt) loader = QuickLoader(train_data, opt.method, train_config, n_threads=opt.threads, augmentation=opt.augment) it = loader.make_one_shot_iterator(opt.memory_limit, shuffle=True) with tqdm.tqdm(it, unit='batch', ascii=True) as r: for items in r: label, feature, names = items[:3] # label is usually HR image, feature is usually LR image batch_label = np.split(label, label.shape[0]) batch_feature = np.split(feature, feature.shape[0]) batch_name = np.split(names, names.shape[0]) for hr, lr, name in zip(batch_label, batch_feature, batch_name): hr = np.squeeze(hr) lr = np.squeeze(lr) name = np.squeeze(name) with io.BytesIO() as fp: Image.fromarray(hr, 'RGB').save(fp, format='png') fp.seek(0) hr_png = fp.read() with io.BytesIO() as fp: Image.fromarray(lr, 'RGB').save(fp, format='png') fp.seek(0) lr_png = fp.read() lr_post = process(lr, crf_matrix, (opt.sigma[0], opt.sigma[1])) with io.BytesIO() as fp: if opt.jpeg_quality: Image.fromarray(lr_post, 'RGB').save(fp, format='jpeg', quality=opt.jpeg_quality) else: Image.fromarray(lr_post, 'RGB').save(fp, format='png') fp.seek(0) post_png = fp.read() label = "{}_{}_{}".format(*name).encode() make_tensor_label_records( [hr_png, lr_png, label, post_png], ["image/hr", "image/lr", "name", "image/post"], writer)
def main(): flags, args = parser.parse_known_args() opt = Config() for pair in flags._get_kwargs(): opt.setdefault(*pair) data_config_file = Path(flags.data_config) if not data_config_file.exists(): raise RuntimeError("dataset config file doesn't exist!") for _ext in ('json', 'yaml', 'yml'): # for compat # apply a 2-stage (or master-slave) configuration, master can be # override by slave model_config_root = Path('Parameters/root.{}'.format(_ext)) if opt.p: model_config_file = Path(opt.p) else: model_config_file = Path('Parameters/{}.{}'.format(opt.model, _ext)) if model_config_root.exists(): opt.update(Config(str(model_config_root))) if model_config_file.exists(): opt.update(Config(str(model_config_file))) model_params = opt.get(opt.model, {}) suppress_opt_by_args(model_params, *args) opt.update(model_params) model = get_model(flags.model)(**model_params) if flags.cuda: model.cuda() root = f'{flags.save_dir}/{flags.model}' if flags.comment: root += '_' + flags.comment verbosity = logging.DEBUG if flags.verbose else logging.INFO trainer = model.trainer datasets = load_datasets(data_config_file) try: test_datas = [datasets[t.upper()] for t in flags.test] run_benchmark = True except KeyError: test_datas = [] for pattern in flags.test: test_data = Dataset(test=_glob_absolute_pattern(pattern), mode='pil-image1', modcrop=False) father = Path(flags.test) while not father.is_dir(): if father.parent == father: break father = father.parent test_data.name = father.stem test_datas.append(test_data) run_benchmark = False if opt.verbose: dump(opt) for test_data in test_datas: loader_config = Config(convert_to='rgb', feature_callbacks=[], label_callbacks=[], output_callbacks=[], **opt) loader_config.batch = 1 loader_config.subdir = test_data.name loader_config.output_callbacks += [ save_image(root, flags.output_index, flags.auto_rename)] if opt.channel == 1: loader_config.convert_to = 'gray' with trainer(model, root, verbosity, flags.pth) as t: if flags.seed is not None: t.set_seed(flags.seed) loader = QuickLoader(test_data, 'test', loader_config, n_threads=flags.thread) loader_config.epoch = flags.epoch if run_benchmark: t.benchmark(loader, loader_config) else: t.infer(loader, loader_config)