コード例 #1
0
	def evaluate(self, test_filenames):

		total_mse = total_psnr = 0
		if len(test_filenames) == 0:
			return 0, 0

		for filename in test_filenames:
			mse = self.do_for_evaluate(filename, print_console=False)
			total_mse += mse
			total_psnr += util.get_psnr(mse, max_value=self.max_value)

		return total_mse / len(test_filenames), total_psnr / len(test_filenames)
コード例 #2
0
    def do_for_evaluate_with_output(self, file_path, output_directory, print_console=False):

        filename, extension = os.path.splitext(file_path)
        output_directory += "/" + self.name + "/"
        util.make_dir(output_directory)

        true_image = util.set_image_alignment(util.load_image(file_path, print_console=False), self.scale)

        if true_image.shape[2] == 3 and self.channels == 1:

            # for color images
            input_y_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale,
                                                     alignment=self.scale, convert_ycbcr=True)
            input_bicubic_y_image = util.resize_image_by_pil(input_y_image, self.scale,
                                                             resampling_method=self.resampling_method)

            true_ycbcr_image = util.convert_rgb_to_ycbcr(true_image)

            output_y_image = self.do(input_y_image, input_bicubic_y_image)
            mse = util.compute_mse(true_ycbcr_image[:, :, 0:1], output_y_image,
                                   border_size=self.psnr_calc_border_size)
            loss_image = util.get_loss_image(true_ycbcr_image[:, :, 0:1], output_y_image,
                                             border_size=self.psnr_calc_border_size)

            output_color_image = util.convert_y_and_cbcr_to_rgb(output_y_image, true_ycbcr_image[:, :, 1:3])

            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_input" + extension, input_y_image)
            util.save_image(output_directory + filename + "_input_bicubic" + extension, input_bicubic_y_image)
            util.save_image(output_directory + filename + "_true_y" + extension, true_ycbcr_image[:, :, 0:1])
            util.save_image(output_directory + filename + "_result" + extension, output_y_image)
            util.save_image(output_directory + filename + "_result_c" + extension, output_color_image)
            util.save_image(output_directory + filename + "_loss" + extension, loss_image)

        elif true_image.shape[2] == 1 and self.channels == 1:

            # for monochrome images
            input_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale,
                                                   alignment=self.scale)
            input_bicubic_y_image = util.resize_image_by_pil(input_image, self.scale,
                                                             resampling_method=self.resampling_method)
            output_image = self.do(input_image, input_bicubic_y_image)
            mse = util.compute_mse(true_image, output_image, border_size=self.psnr_calc_border_size)
            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_result" + extension, output_image)
        else:
            mse = 0

        if print_console:
            print("[%s] MSE:%f, PSNR:%f" % (filename, mse, util.get_psnr(mse)))

        return mse
コード例 #3
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()

    model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name)

    model.load_datasets("training", FLAGS.data_dir + "/" + FLAGS.dataset,
                        FLAGS.batch_dir + "/" + FLAGS.dataset,
                        FLAGS.batch_image_size, FLAGS.stride_size)
    model.load_datasets("test", FLAGS.data_dir + "/" + FLAGS.test_dataset,
                        FLAGS.batch_dir + "/" + FLAGS.test_dataset,
                        FLAGS.batch_image_size, FLAGS.stride_size)

    model.build_graph()
    model.build_optimizer()
    model.build_summary_saver()
    logging.info("\n" + str(sys.argv))
    logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" +
                 FLAGS.dataset)

    final_mse = final_psnr = 0
    test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                 FLAGS.test_dataset)

    for i in range(FLAGS.tests):

        train(model, FLAGS, i)

        total_psnr = total_mse = 0
        for filename in test_filenames:
            mse = model.do_for_evaluate(filename,
                                        FLAGS.output_dir,
                                        output=i is (FLAGS.tests - 1))
            total_mse += mse
            total_psnr += util.get_psnr(mse, max_value=FLAGS.max_value)

        logging.info("\nTrial(%d) %s" % (i, util.get_now_date()))
        model.print_steps_completed(output_to_logging=True)
        logging.info("MSE:%f, PSNR:%f\n" % (total_mse / len(test_filenames),
                                            total_psnr / len(test_filenames)))

        final_mse += total_mse
        final_psnr += total_psnr

    logging.info("=== summary [%d] %s [%s] ===" %
                 (FLAGS.tests, model.name, util.get_now_date()))
    util.print_num_of_total_parameters(output_to_logging=True)
    n = len(test_filenames) * FLAGS.tests
    logging.info("\n=== Average [%s] MSE:%f, PSNR:%f ===" %
                 (FLAGS.test_dataset, final_mse / n, final_psnr / n))
コード例 #4
0
def test(model, test_data):
    test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                 test_data)
    total_psnr = total_mse = 0

    for filename in test_filenames:
        mse = model.do_for_evaluate_with_output(
            filename, output_directory=FLAGS.output_dir, print_console=False)
        total_mse += mse
        total_psnr += util.get_psnr(mse)

    logging.info("\n=== [%s] MSE:%f, PSNR:%f ===" %
                 (test_data, total_mse / len(test_filenames),
                  total_psnr / len(test_filenames)))
コード例 #5
0
    def train_batch(self):

        _, mse = self.sess.run(
            [self.training_optimizer, self.mse],
            feed_dict={
                self.x: self.batch_input,
                self.x2: self.batch_input_quad,
                self.y: self.batch_true_quad,
                self.lr_input: self.lr,
                self.dropout_input: self.dropout
            })
        self.training_psnr_sum += util.get_psnr(mse, max_value=self.max_value)
        self.training_step += 1
        self.step += 1
コード例 #6
0
def test(model, test_data):
    test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                 test_data)
    total_psnr = total_mse = 0

    for filename in test_filenames:
        mse = model.do_for_evaluate(filename,
                                    output_directory=FLAGS.output_dir,
                                    output=FLAGS.save_results)
        total_mse += mse
        total_psnr += util.get_psnr(mse, max_value=FLAGS.max_value)

    logging.info("\n=== Average [%s] MSE:%f, PSNR:%f ===" %
                 (test_data, total_mse / len(test_filenames),
                  total_psnr / len(test_filenames)))
コード例 #7
0
    def train_batch(self):

        feed_dict = {self.x: self.batch_input, self.x2: self.batch_input_bicubic, self.y: self.batch_true,
                     self.lr_input: self.lr, self.dropout: self.dropout_rate, self.is_training: 1}

        if self.use_l1_loss:
            _, loss = self.sess.run([self.training_optimizer, self.image_loss], feed_dict=feed_dict)
            self.training_loss_sum += loss
        else:
            _, mse = self.sess.run([self.training_optimizer, self.mse], feed_dict=feed_dict)
            self.training_loss_sum += mse
            self.training_psnr_sum += util.get_psnr(mse, max_value=self.max_value)

        self.training_step += 1
        self.step += 1
コード例 #8
0
    def do_for_evaluate(self, file_path, output_directory="output", output=True, print_console=True):

        filename, extension = os.path.splitext(file_path)
        output_directory += "/"
        true_image = util.set_image_alignment(util.load_image(file_path), self.scale)

        if true_image.shape[2] == 3 and self.channels == 1:
            input_y_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale,
                                                     alignment=self.scale, convert_ycbcr=True, jpeg_mode=self.jpeg_mode)
            # for color images
            if output:
                input_bicubic_y_image = util.resize_image_by_pil(input_y_image, self.scale)
                true_ycbcr_image = util.convert_rgb_to_ycbcr(true_image, jpeg_mode=self.jpeg_mode)

                output_y_image = self.do(input_y_image, input_bicubic_y_image)
                mse = util.compute_mse(true_ycbcr_image[:, :, 0:1], output_y_image, border_size=self.scale)
                loss_image = util.get_loss_image(true_ycbcr_image[:, :, 0:1], output_y_image, border_size=self.scale)

                output_color_image = util.convert_y_and_cbcr_to_rgb(output_y_image, true_ycbcr_image[:, :, 1:3],
                                                                    jpeg_mode=self.jpeg_mode)

                util.save_image(output_directory + file_path, true_image)
                util.save_image(output_directory + filename + "_input" + extension, input_y_image)
                util.save_image(output_directory + filename + "_input_bicubic" + extension, input_bicubic_y_image)
                util.save_image(output_directory + filename + "_true_y" + extension, true_ycbcr_image[:, :, 0:1])
                util.save_image(output_directory + filename + "_result" + extension, output_y_image)
                util.save_image(output_directory + filename + "_result_c" + extension, output_color_image)
                util.save_image(output_directory + filename + "_loss" + extension, loss_image)
            else:
                true_y_image = util.convert_rgb_to_y(true_image, jpeg_mode=self.jpeg_mode)
                output_y_image = self.do(input_y_image)
                mse = util.compute_mse(true_y_image, output_y_image, border_size=self.scale)

        elif true_image.shape[2] == 1 and self.channels == 1:

            # for monochrome images
            input_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale, alignment=self.scale)
            output_image = self.do(input_image)
            mse = util.compute_mse(true_image, output_image, border_size=self.scale)
            if output:
                util.save_image(output_directory + file_path, true_image)
                util.save_image(output_directory + filename + "_result" + extension, output_image)

        if print_console:
            print("MSE:%f PSNR:%f" % (mse, util.get_psnr(mse)))
        return mse
コード例 #9
0
    def do_for_evaluate(self, file_path, print_console=False):

        true_image = util.set_image_alignment(
            util.load_image(file_path, print_console=False), self.scale)

        if true_image.shape[2] == 3 and self.channels == 1:

            # for color images
            input_y_image = loader.build_input_image(true_image,
                                                     channels=self.channels,
                                                     scale=self.scale,
                                                     alignment=self.scale,
                                                     convert_ycbcr=True)
            true_y_image = util.convert_rgb_to_y(true_image)
            input_bicubic_y_image = util.resize_image_by_pil(
                input_y_image,
                self.scale,
                resampling_method=self.resampling_method)
            output_y_image = self.do(input_y_image, input_bicubic_y_image)
            mse = util.compute_mse(true_y_image,
                                   output_y_image,
                                   border_size=self.psnr_calc_border_size)

        elif true_image.shape[2] == 1 and self.channels == 1:

            # for monochrome images
            input_image = loader.build_input_image(true_image,
                                                   channels=self.channels,
                                                   scale=self.scale,
                                                   alignment=self.scale)
            input_bicubic_y_image = util.resize_image_by_pil(
                input_image,
                self.scale,
                resampling_method=self.resampling_method)
            output_image = self.do(input_image, input_bicubic_y_image)
            mse = util.compute_mse(true_image,
                                   output_image,
                                   border_size=self.psnr_calc_border_size)
        else:
            mse = 0

        if print_console:
            print("MSE:%f, PSNR:%f" % (mse, util.get_psnr(mse)))

        return mse
コード例 #10
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()

    model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name)

    if FLAGS.build_batch:
        model.load_datasets(FLAGS.data_dir + "/" + FLAGS.dataset,
                            FLAGS.batch_dir + "/" + FLAGS.dataset,
                            FLAGS.batch_image_size, FLAGS.stride_size)
    else:
        model.load_dynamic_datasets(FLAGS.data_dir + "/" + FLAGS.dataset,
                                    FLAGS.batch_image_size)
    model.build_graph()
    model.build_optimizer()
    model.build_summary_saver()

    logging.info("\n" + str(sys.argv))
    logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" +
                 FLAGS.dataset)
    util.print_num_of_total_parameters(output_to_logging=True)

    total_psnr = total_mse = 0

    for i in range(FLAGS.tests):

        mse = train(model, FLAGS, i)
        psnr = util.get_psnr(mse, max_value=FLAGS.max_value)
        total_mse += mse
        total_psnr += psnr

        logging.info("\nTrial(%d) %s" % (i, util.get_now_date()))
        model.print_steps_completed(output_to_logging=True)
        logging.info("MSE:%f, PSNR:%f\n" % (mse, psnr))

    if FLAGS.tests > 1:
        logging.info("\n=== Final Average [%s] MSE:%f, PSNR:%f ===" %
                     (FLAGS.test_dataset, total_mse / FLAGS.tests,
                      total_psnr / FLAGS.tests))

    model.copy_log_to_archive("archive")
コード例 #11
0
ファイル: LFFN.py プロジェクト: qibao77/LFFN
    def train_batch(self, loader):

        for batch in enumerate(loader.loader_train):
            lr, hr = batch[1][0], batch[1][1]

            feed_dict = {
                self.x: lr.numpy(),
                self.y: hr.numpy(),
                self.lr_input: self.lr,
                self.is_training: 1
            }

            _, mse = self.sess.run([self.training_optimizer, self.mae],
                                   feed_dict=feed_dict)

            self.training_mse_sum += mse
            self.training_psnr_sum += util.get_psnr(mse,
                                                    max_value=self.max_value)
            self.training_step += 1
            self.step += 1
コード例 #12
0
    def update_epoch_and_lr(self, mse):
        lr_updated = False

        if self.min_validation_mse < 0 or self.min_validation_mse > mse:
            # update new mse
            self.min_validation_epoch = self.epochs_completed
            self.min_validation_mse = mse
        else:
            if self.epochs_completed > self.min_validation_epoch + self.lr_decay_epoch:
                # set new learning rate
                self.min_validation_epoch = self.epochs_completed
                self.lr *= self.lr_decay
                lr_updated = True

        psnr = util.get_psnr(mse, max_value=self.max_value)
        self.csv_epochs.append(self.epochs_completed)
        self.csv_psnr.append(psnr)
        self.csv_training_psnr.append(self.training_psnr_sum / self.training_step)

        return lr_updated
コード例 #13
0
ファイル: evaluate.py プロジェクト: doctorwgd/DCDRN
def test(model, test_data):

    test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                 test_data)
    total_psnr = total_time = total_mse = 0
    i = 0
    for filename in test_filenames:
        mse, time1 = model.do_for_evaluate(filename,
                                           output_directory=FLAGS.output_dir,
                                           output=FLAGS.save_results)
        total_mse += mse
        if i != 0:
            total_time += time1
        total_psnr += util.get_psnr(mse, max_value=FLAGS.max_value)
        i = i + 1
    logging.info("\n=== Average [%s] MSE:%f, PSNR:%f ===" %
                 (test_data, total_mse / len(test_filenames),
                  total_psnr / len(test_filenames)))
    ave_time = total_time / (len(test_filenames) - 1)
    #print ("end_t",end_t,"start_t",start_t)

    print("total_time: %4.4f   ave_time: %4.4f   " % (total_time, ave_time))
コード例 #14
0
    def evaluate_test_batch(self,
                            save_meta_data=False,
                            trial=0,
                            log_profile=True):

        save_meta_data = save_meta_data and self.save_meta_data and (trial
                                                                     == 0)
        feed_dict = {
            self.x: self.test.input.images,
            self.x2: self.test.input.hr_images,
            self.y: self.test.true.hr_images,
            self.dropout: 1.0,
            self.is_training: 0
        }

        if log_profile and (self.save_loss or self.save_weights
                            or save_meta_data):

            if save_meta_data:
                # profiler = tf.profiler.Profile(self.sess.graph)

                run_metadata = tf.RunMetadata()
                run_options = tf.RunOptions(
                    trace_level=tf.RunOptions.FULL_TRACE)
                summary_str, mse = self.sess.run([self.summary_op, self.mse],
                                                 feed_dict=feed_dict,
                                                 options=run_options,
                                                 run_metadata=run_metadata)
                self.test_writer.add_run_metadata(
                    run_metadata, "step%d" % self.epochs_completed)

                filename = self.checkpoint_dir + "/" + self.name + "_metadata.txt"
                with open(filename, "w") as out:
                    out.write(str(run_metadata))

                # filename = self.checkpoint_dir + "/" + self.name + "_memory.txt"
                # tf.profiler.write_op_log(
                # 	tf.get_default_graph(),
                # 	log_dir=self.checkpoint_dir,
                # 	#op_log=op_log,
                # 	run_meta=run_metadata)

                tf.contrib.tfprof.model_analyzer.print_model_analysis(
                    tf.get_default_graph(),
                    run_meta=run_metadata,
                    tfprof_options=tf.contrib.tfprof.model_analyzer.
                    PRINT_ALL_TIMING_MEMORY)

            else:
                summary_str, mse = self.sess.run([self.summary_op, self.mse],
                                                 feed_dict=feed_dict)

            self.train_writer.add_summary(summary_str, self.epochs_completed)
            util.log_scalar_value(self.train_writer, 'training_PSNR',
                                  self.training_psnr_sum / self.training_step,
                                  self.epochs_completed)
            util.log_scalar_value(self.train_writer, 'LR', self.lr,
                                  self.epochs_completed)
            self.train_writer.flush()

            util.log_scalar_value(self.test_writer, 'PSNR', util.get_psnr(mse),
                                  self.epochs_completed)
            self.test_writer.flush()
        else:
            mse = self.sess.run(self.mse, feed_dict=feed_dict)

        return mse