Example #1
0
def evaluate_model(path, model, pixel_mean, pixel_std, SR_FACTOR=3, sigma=1):
    """
        Computes average Peak Signal to Noise Ratio over a set of images and
        the versions super resolved by a srcnn or variant.

        Args:
            path (string): relative path to directory containing images for evaluation
            model (PyTorch model): the model to be evaluated
            pixel_mean (float): mean luminance value to be used for standardization
            pixel_std (float): std. dev. of luminance value to be used for standardization
            SR_FACTOR (int): super resolution factor
            sigma (int): the std. dev. to use for the gaussian blur
    """

    img_names = [im for im in os.listdir(path) if im[-4:] == '.bmp' or im[-4:] == '.jpg']
    blurred_img_psnrs = []
    out_img_psnrs = []
    for test_im in img_names:

        blurred_test_im = distort_image(path=path+test_im, factor=SR_FACTOR, sigma=sigma)
        ImageFile = Image.open(path+test_im)
        im = np.array(ImageFile.convert('YCbCr'))

        #normalize
        model_input = blurred_test_im[:, :, 0] / 255.0
        #standardize
        model_input -= pixel_mean
        model_input /= pixel_std

        im_out_Y = model(torch.tensor(model_input,
                                      dtype=torch.float).unsqueeze(0).unsqueeze(0).to(DEVICE))
        im_out_Y = im_out_Y.detach().squeeze().squeeze().cpu().numpy().astype(np.float64)
        im_out_viz = np.zeros((im_out_Y.shape[0], im_out_Y.shape[1], 3))

        #unstandardize
        im_out_Y = (im_out_Y * pixel_std) + pixel_mean

        #un-normalize
        im_out_Y *= 255.0

        im_out_viz[:, :, 0] = im_out_Y
        im_out_viz[:, :, 1] = im[:, :, 1]
        im_out_viz[:, :, 2] = im[:, :, 2]

        im_out_viz[:, :, 0] = np.around(im_out_viz[:, :, 0])
        #psnr on Y only
        blur_psnr = psnr(im[:, :, 0], blurred_test_im[:, :, 0])
        sr_psnr = psnr(im[:, :, 0], im_out_viz[:, :, 0])

        blurred_img_psnrs.append(blur_psnr)
        out_img_psnrs.append(sr_psnr)

    mean_blur_psnr = np.mean(np.array(blurred_img_psnrs))
    mean_sr_psnr = np.mean(np.array(out_img_psnrs))
    return mean_blur_psnr, mean_sr_psnr
Example #2
0
  def build_valid_rl(self, shuffle=False):
    print("-" * 80)
    print("Build valid graph on shuffled data")
    with tf.device("/cpu:0"):
      # shuffled valid data: for choosing validation model
      self.iterators['valid_shuffle'] = self.datasets['valid'].make_one_shot_iterator()
      x_valid_shuffle, y_valid_shuffle = self.iterators['valid_shuffle'].get_next()

    x_bicubic = tf.image.resize_bicubic(
      x_valid_shuffle, [128, 128])
    output = self._model(x_valid_shuffle, is_training=True, reuse=True)
    self.valid_shuffle_psnr = psnr(y_valid_shuffle, output) - psnr(y_valid_shuffle, x_bicubic)
Example #3
0
  def _build_valid(self):
    if self.x_valid is not None:
      print("-" * 80)
      print("Build valid graph")
      output = self._model(self.x_valid, False, reuse=True)

      self.valid_psnr = psnr(self.y_valid, output)
Example #4
0
  def _build_train(self):
    print("-" * 80)
    print("Build train graph")
    output = self._model(self.x_train, is_training=True)
    target = (self.y_train - 127) / 127
    self.loss = tf.reduce_mean(
      tf.losses.absolute_difference(target, output))
    train_loss = self.loss

    self.train_psnr = psnr(self.y_train, output)

    tf.summary.scalar('loss', self.loss)
    output = output * 127 + 127
    output = tf.clip_by_value(output, 0, 255)
    input_img = self.x_train*127 + 127
    bicubic_img = tf.image.resize_bicubic(input_img, [128, 128])
    tf.summary.image("output", tf.cast(output, tf.uint8))
    tf.summary.image("target", tf.cast(self.y_train, tf.uint8))
    tf.summary.image("input", tf.cast(input_img, tf.uint8))
    tf.summary.image("bicubic", tf.cast(bicubic_img, tf.uint8))

    tf_variables = [
      var for var in tf.trainable_variables() if (
        var.name.startswith(self.name) and "aux_head" not in var.name)]
    self.num_vars = count_model_params(tf_variables)
    print("Model has {0} params".format(self.num_vars))

    self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
      train_loss,
      tf_variables,
      self.global_step,
      clip_mode=self.clip_mode,
      grad_bound=self.grad_bound,
      l2_reg=self.l2_reg,
      lr_init=self.lr_init,
      lr_dec_start=self.lr_dec_start,
      lr_dec_every=self.lr_dec_every,
      lr_dec_rate=self.lr_dec_rate,
      lr_cosine=self.lr_cosine,
      lr_max=self.lr_max,
      lr_min=self.lr_min,
      lr_T_0=self.lr_T_0,
      lr_T_mul=self.lr_T_mul,
      num_train_batches=self.num_train_batches,
      optim_algo=self.optim_algo,
      sync_replicas=self.sync_replicas,
      num_aggregate=self.num_aggregate,
      num_replicas=self.num_replicas)

    tf.summary.scalar('lr', self.lr)
    self.summaries = tf.summary.merge_all()
Example #5
0
    def build_valid_rl(self, shuffle=False):
        print("-" * 80)
        print("Build valid graph on shuffled data")
        with tf.device("/cpu:0"):
            # shuffled valid data: for choosing validation model
            if not shuffle and self.data_format == "NCHW":
                self.inputs["valid_original"] = np.transpose(
                    self.inputs["valid_original"], [0, 3, 1, 2])
            self.iterators['valid_shuffle'] = self.datasets[
                'valid'].make_one_shot_iterator()
            x_valid_shuffle, y_valid_shuffle = self.iterators[
                'valid_shuffle'].get_next()

        output = self._model(x_valid_shuffle, is_training=True, reuse=True)
        self.valid_shuffle_psnr = psnr(y_valid_shuffle, output)
Example #6
0
 def _build_test(self):
   print("-" * 80)
   print("Build test graph")
   output = self._model(self.x_test, False, reuse=True)
   self.test_psnr = psnr(self.y_test, output)