def eval_sam(conf): """ Evaluate against the entire test set of images. Args: conf: configuration dictionary """ path_te = conf['path_eval'] iw = conf['iw'] sr = conf['sr'] cw = conf['cw'] fns_te = preproc._get_filenames(path_te) n = len(fns_te) with tf.Graph().as_default(), tf.device('/cpu:0' if FLAGS.dev_assign else None): # Placeholders Xs = [tf.placeholder(tf.float32, [None, iw, iw, 1], name='X_%02d' % i) \ for i in range(FLAGS.num_gpus)] y_splits = [] for i in range(FLAGS.num_gpus): with tf.device(('/gpu:%d' % i) if FLAGS.dev_assign else None): with tf.name_scope('%s_%02d' % (FLAGS.tower_name, i)) as scope: y_split, _ = model.inference(Xs[i], conf) y_splits.append(y_split) tf.get_variable_scope().reuse_variables() y = tf.concat(0, y_splits, name='y') # Restore saver = tf.train.Saver(tf.trainable_variables()) sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) ckpt = tf.train.get_checkpoint_state(conf['path_tmp']) if ckpt: ckpt = ckpt.model_checkpoint_path print('checkpoint found: %s' % ckpt) saver.restore(sess, ckpt) else: print('checkpoint not found!') time.sleep(2) # Iterate over each image, and calculate error for fn in fns_te: lr = preproc.imresize(sm.imread(fn), float(sr)) lr = preproc.shave(lr, sr) # border = sr fn_ = fn.split('/')[-1].split('.')[0] out_name = os.path.join('tmp', fn_ + '_HR.bmp') infer(lr, Xs, y, sess, conf, out_name);
def eval_sam(conf): """ Evaluate against the entire test set of images. Args: conf: configuration dictionary """ path_te = conf["path_eval"] iw = conf["iw"] sr = conf["sr"] cw = conf["cw"] fns_te = preproc._get_filenames(path_te) n = len(fns_te) with tf.Graph().as_default(), tf.device("/cpu:0" if FLAGS.dev_assign else None): # Placeholders Xs = [tf.placeholder(tf.float32, [None, iw, iw, 1], name="X_%02d" % i) for i in range(FLAGS.num_gpus)] y_splits = [] for i in range(FLAGS.num_gpus): with tf.device(("/gpu:%d" % i) if FLAGS.dev_assign else None): with tf.name_scope("%s_%02d" % (FLAGS.tower_name, i)) as scope: y_split, _ = model.inference(Xs[i], conf) y_splits.append(y_split) tf.get_variable_scope().reuse_variables() y = tf.concat(0, y_splits, name="y") # Restore saver = tf.train.Saver(tf.trainable_variables()) sess = tf.Session( config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement) ) ckpt = tf.train.get_checkpoint_state(conf["path_tmp"]) if ckpt: ckpt = ckpt.model_checkpoint_path print("checkpoint found: %s" % ckpt) saver.restore(sess, ckpt) else: print("checkpoint not found!") time.sleep(2) # Iterate over each image, and calculate error for fn in fns_te: lr = preproc.imresize(sm.imread(fn), float(sr)) lr = preproc.shave(lr, sr) # border = sr fn_ = fn.split("/")[-1].split(".")[0] out_name = os.path.join("tmp", fn_ + "_HR.bmp") infer(lr, Xs, y, sess, conf, out_name)
def eval_te(conf): """ Evaluate against the entire test set of images. Args: conf: configuration dictionary Returns: psnr: psnr of entire test set """ path_te = conf['path_va'] iw = conf['iw'] sr = conf['sr'] cw = conf['cw'] save = conf['save_sr_imgs'] fns_te = preproc._get_filenames(path_te) n = len(fns_te) with tf.Graph().as_default(), tf.device('/cpu:0' if FLAGS.dev_assign else None): # Placeholders Xs = [tf.placeholder(tf.float32, [None, iw, iw, 1], name='X_%02d' % i) \ for i in range(FLAGS.num_gpus)] y_splits = [] for i in range(FLAGS.num_gpus): with tf.device(('/gpu:%d' % i) if FLAGS.dev_assign else None): with tf.name_scope('%s_%02d' % (FLAGS.tower_name, i)) as scope: y_split, _ = model.inference(Xs[i], conf) y_splits.append(y_split) tf.get_variable_scope().reuse_variables() y = tf.concat(0, y_splits, name='y') # Restore saver = tf.train.Saver(tf.trainable_variables()) sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) ckpt = tf.train.get_checkpoint_state(conf['path_tmp']) if ckpt: ckpt = ckpt.model_checkpoint_path print('checkpoint found: %s' % ckpt) saver.restore(sess, ckpt) else: print('checkpoint not found!') time.sleep(2) # Iterate over each image, and calculate error tmse = 0 bl_tmse = 0 for fn in fns_te: lr, gt = preproc.lr_hr(sm.imread(fn), sr) fn_ = fn.split('/')[-1].split('.')[0] out_name = os.path.join('tmp', fn_ + '_HR.png') if save else None hr = infer(lr, Xs, y, sess, conf, out_name) # Evaluate gt = gt[cw:, cw:] gt = gt[:hr.shape[0], :hr.shape[1]] diff = gt.astype(np.float32) - hr.astype(np.float32) mse = np.mean(diff ** 2) tmse += mse psnr = 20 * np.log10(255.0 / np.sqrt(mse)) lr = lr[cw:, cw:] lr = lr[:hr.shape[0], :hr.shape[1]] bl_diff = gt.astype(np.float32) - lr.astype(np.float32) bl_mse = np.mean(bl_diff ** 2) bl_tmse += bl_mse bl_psnr = 20 * np.log10(255.0 / np.sqrt(bl_mse)) print('hr PSNR: %.3f, lr PSNR % .3f for %s' % \ (psnr, bl_psnr, fn.split('/')[-1])) rmse = np.sqrt(tmse / n) psnr = 20 * np.log10(255. / rmse) bl_rmse = np.sqrt(bl_tmse / n) bl_psnr = 20 * np.log10(255. / bl_rmse) print('total test PSNR: %.3f' % psnr) print('total baseline PSNR: %.3f' % bl_psnr) return psnr, bl_psnr
def eval_te(conf): """ Evaluate against the entire test set of images. Args: conf: configuration dictionary Returns: psnr: psnr of entire test set """ path_te = conf["path_eval"] iw = conf["iw"] sr = conf["sr"] cw = conf["cw"] save = conf["save_sr_imgs"] fns_te = preproc._get_filenames(path_te) n = len(fns_te) with tf.Graph().as_default(), tf.device("/cpu:0" if FLAGS.dev_assign else None): # Placeholders Xs = [tf.placeholder(tf.float32, [None, iw, iw, 1], name="X_%02d" % i) for i in range(FLAGS.num_gpus)] y_splits = [] for i in range(FLAGS.num_gpus): with tf.device(("/gpu:%d" % i) if FLAGS.dev_assign else None): with tf.name_scope("%s_%02d" % (FLAGS.tower_name, i)) as scope: y_split, _ = model.inference(Xs[i], conf) y_splits.append(y_split) tf.get_variable_scope().reuse_variables() y = tf.concat(0, y_splits, name="y") # Restore saver = tf.train.Saver(tf.trainable_variables()) sess = tf.Session( config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement) ) ckpt = tf.train.get_checkpoint_state(conf["path_tmp"]) if ckpt: ckpt = ckpt.model_checkpoint_path print("checkpoint found: %s" % ckpt) saver.restore(sess, ckpt) else: print("checkpoint not found!") time.sleep(2) # Iterate over each image, and calculate error avg_psnr, avg_bl_psnr = 0.0, 0.0 for fn in fns_te: lr, gt = preproc.lr_hr(sm.imread(fn), sr) fn_ = fn.split("/")[-1].split(".")[0] out_name = os.path.join("tmp", fn_ + "_HR.png") if save else None hr = infer(lr, Xs, y, sess, conf, out_name) # Evaluate gt = gt[cw:, cw:] gt = gt[: hr.shape[0], : hr.shape[1]] diff = gt.astype(np.float32) - hr.astype(np.float32) mse = np.mean(diff ** 2) psnr = 20 * np.log10(255.0 / np.sqrt(mse)) avg_psnr += psnr lr = lr[cw:, cw:] lr = lr[: hr.shape[0], : hr.shape[1]] bl_diff = gt.astype(np.float32) - lr.astype(np.float32) bl_mse = np.mean(bl_diff ** 2) bl_psnr = 20 * np.log10(255.0 / np.sqrt(bl_mse)) avg_bl_psnr += bl_psnr print("hr PSNR: %.3f, lr PSNR % .3f for %s" % (psnr, bl_psnr, fn.split("/")[-1])) avg_psnr /= len(fns_te) avg_bl_psnr /= len(fns_te) print("average test PSNR: %.3f" % avg_psnr) print("average baseline PSNR: %.3f" % avg_bl_psnr) return avg_psnr, avg_bl_psnr
def eval_te(conf): """ Evaluate against the entire test set of images. Args: conf: configuration dictionary Returns: psnr: psnr of entire test set """ path_te = conf['path_eval'] iw = conf['iw'] sr = conf['sr'] cw = conf['cw'] save = conf['save_sr_imgs'] fns_te = preproc._get_filenames(path_te) n = len(fns_te) with tf.Graph().as_default(), tf.device('/cpu:0' if FLAGS.dev_assign else None): # Placeholders Xs = [tf.placeholder(tf.float32, [None, iw, iw, 1], name='X_%02d' % i) \ for i in range(FLAGS.num_gpus)] y_splits = [] for i in range(FLAGS.num_gpus): with tf.device(('/gpu:%d' % i) if FLAGS.dev_assign else None): with tf.name_scope('%s_%02d' % (FLAGS.tower_name, i)) as scope: y_split, _ = model.inference(Xs[i], conf) y_splits.append(y_split) tf.get_variable_scope().reuse_variables() y = tf.concat(0, y_splits, name='y') # Restore saver = tf.train.Saver(tf.trainable_variables()) sess = tf.Session(config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)) ckpt = tf.train.get_checkpoint_state(conf['path_tmp']) if ckpt: ckpt = ckpt.model_checkpoint_path print('checkpoint found: %s' % ckpt) saver.restore(sess, ckpt) else: print('checkpoint not found!') time.sleep(2) # Iterate over each image, and calculate error avg_psnr, avg_bl_psnr = 0., 0. for fn in fns_te: lr, gt = preproc.lr_hr(sm.imread(fn), sr) fn_ = fn.split('/')[-1].split('.')[0] out_name = os.path.join('tmp', fn_ + '_HR.png') if save else None hr = infer(lr, Xs, y, sess, conf, out_name) # Evaluate gt = gt[cw:, cw:] gt = gt[:hr.shape[0], :hr.shape[1]] diff = gt.astype(np.float32) - hr.astype(np.float32) mse = np.mean(diff ** 2) psnr = 20 * np.log10(255.0 / np.sqrt(mse)) avg_psnr += psnr lr = lr[cw:, cw:] lr = lr[:hr.shape[0], :hr.shape[1]] bl_diff = gt.astype(np.float32) - lr.astype(np.float32) bl_mse = np.mean(bl_diff ** 2) bl_psnr = 20 * np.log10(255.0 / np.sqrt(bl_mse)) avg_bl_psnr += bl_psnr print('hr PSNR: %.3f, lr PSNR % .3f for %s' % \ (psnr, bl_psnr, fn.split('/')[-1])) avg_psnr /= len(fns_te) avg_bl_psnr /= len(fns_te) print('average test PSNR: %.3f' % avg_psnr) print('average baseline PSNR: %.3f' % avg_bl_psnr) return avg_psnr, avg_bl_psnr