Ejemplo n.º 1
0
    def load_datasets(self,
                      target,
                      data_dir,
                      batch_dir,
                      batch_image_size,
                      stride_size=0):

        print("Loading datasets for [%s]..." % target)
        util.make_dir(batch_dir)

        if stride_size == 0:
            stride_size = batch_image_size // 2

        if self.bicubic_init:
            resampling_method = "bicubic"
        else:
            resampling_method = "nearest"

        datasets = loader.DataSets(self.scale,
                                   batch_image_size,
                                   stride_size,
                                   channels=self.channels,
                                   jpeg_mode=self.jpeg_mode,
                                   max_value=self.max_value,
                                   resampling_method=resampling_method)

        if not datasets.is_batch_exist(batch_dir):
            datasets.build_batch(data_dir, batch_dir)
        datasets.load_batch(batch_dir)

        if target == "training":
            self.train = datasets
        else:
            self.test = datasets
Ejemplo n.º 2
0
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.scale = flags.scale
        self.layers = flags.layers
        self.filters = flags.filters
        self.min_filters = min(flags.filters, flags.min_filters)
        self.filters_decay_gamma = flags.filters_decay_gamma
        self.use_nin = flags.use_nin
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2
        self.reconstruct_layers = max(flags.reconstruct_layers, 1)
        self.reconstruct_filters = flags.reconstruct_filters
        self.resampling_method = BICUBIC_METHOD_STRING
        self.pixel_shuffler = flags.pixel_shuffler
        self.pixel_shuffler_filters = flags.pixel_shuffler_filters
        self.self_ensemble = flags.self_ensemble

        # Image Processing Parameters
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.output_channels = 1
        self.psnr_calc_border_size = flags.psnr_calc_border_size
        if self.psnr_calc_border_size < 0:
            self.psnr_calc_border_size = 2 + self.scale

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.total_epochs = 0

        util.make_dir(self.checkpoint_dir)
        logging.info("\nDCSCN v2-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))
Ejemplo n.º 3
0
    def load_datasets(self,
                      data_dir,
                      batch_dir,
                      batch_image_size,
                      stride_size=0):
        """ build input patch images and loads as a datasets
		Opens image directory as a datasets.
		Each images are splitted into patch images and converted to input image. Since loading
		(especially from PNG/JPG) and building input-LR images needs much computation in the
		training phase, building pre-processed images makes training much faster. However, images
		are limited by divided grids.
		"""

        batch_dir += "/scale%d" % self.scale
        print("Loading datasets for [%s]..." % batch_dir)
        util.make_dir(batch_dir)

        datasets = loader.DataSets(self.scale,
                                   batch_image_size,
                                   stride_size,
                                   channels=self.channels,
                                   jpeg_mode=self.jpeg_mode,
                                   max_value=self.max_value,
                                   resampling_method=self.resampling_method)

        if not datasets.is_batch_exist(batch_dir):
            datasets.build_batch(data_dir, batch_dir)
        datasets.load_batch(batch_dir)

        return datasets
Ejemplo n.º 4
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()
    training_filenames = util.get_files_in_directory(
        "/media/data1/ww/sr_data/DIV2K_aug2/DIV2K_train_HR")
    # target_dir_x2 = "/media/data1/ww/sr_data/DIV2K_aug2/291_LR_bicubic_X2/291_LR_bicubic/X2"
    target_dir_x3 = "/media/data1/ww/sr_data/DIV2K_aug2/DIV2K_train_LR_bicubic_X3/DIV2K_train_LR_bicubic/X3"
    target_dir_x4 = "/media/data1/ww/sr_data/DIV2K_aug2/DIV2K_train_LR_bicubic_X4/DIV2K_train_LR_bicubic/X4"
    # util.make_dir(target_dir_x2)
    util.make_dir(target_dir_x3)
    util.make_dir(target_dir_x4)
    for file_path in training_filenames:
        org_image = util.load_image(file_path)
        filename = os.path.basename(file_path)
        filename, extension = os.path.splitext(filename)
        # new_filename_x2 = target_dir_x2 + '/' +filename + 'x{}'.format(2)
        new_filename_x3 = target_dir_x3 + '/' + filename + 'x{}'.format(3)
        new_filename_x4 = target_dir_x4 + '/' + filename + 'x{}'.format(4)

        # bicubic_image_x2 = util.resize_image_by_pil(org_image, 1 / 2)
        bicubic_image_x3 = util.resize_image_by_pil(org_image, 1 / 3)
        bicubic_image_x4 = util.resize_image_by_pil(org_image, 1 / 4)
        # util.save_image(new_filename_x2 + extension, bicubic_image_x2)
        util.save_image(new_filename_x3 + extension, bicubic_image_x3)
        util.save_image(new_filename_x4 + extension, bicubic_image_x4)
Ejemplo n.º 5
0
    def do_for_evaluate_with_output(self, file_path, output_directory, print_console=False):

        filename, extension = os.path.splitext(file_path)
        output_directory += "/" + self.name + "/"
        util.make_dir(output_directory)

        true_image = util.set_image_alignment(util.load_image(file_path, print_console=False), self.scale)
        input_image = util.resize_image_by_pil(true_image, 1.0/ self.scale, resampling_method=self.resampling_method)
        input_bicubic_image = util.resize_image_by_pil(input_image, self.scale, resampling_method=self.resampling_method)
        util.save_image(output_directory + filename + "_input_bicubic" + extension, input_bicubic_image)

        if true_image.shape[2] == 3 and self.channels == 1:

            # for color images
            input_y_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale,
                                                     alignment=self.scale, convert_ycbcr=True)
            input_bicubic_y_image = util.resize_image_by_pil(input_y_image, self.scale,
                                                             resampling_method=self.resampling_method)

            true_ycbcr_image = util.convert_rgb_to_ycbcr(true_image)

            output_y_image = self.do(input_y_image, input_bicubic_y_image)
            psnr, ssim = util.compute_psnr_and_ssim(true_ycbcr_image[:, :, 0:1], output_y_image,
                                                    border_size=self.psnr_calc_border_size)
            loss_image = util.get_loss_image(true_ycbcr_image[:, :, 0:1], output_y_image,
                                             border_size=self.psnr_calc_border_size)

            output_color_image = util.convert_y_and_cbcr_to_rgb(output_y_image, true_ycbcr_image[:, :, 1:3])

            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_input" + extension, input_y_image)
            util.save_image(output_directory + filename + "_input_bicubic_y" + extension, input_bicubic_y_image)
            util.save_image(output_directory + filename + "_true_y" + extension, true_ycbcr_image[:, :, 0:1])
            util.save_image(output_directory + filename + "_result" + extension, output_y_image)
            util.save_image(output_directory + filename + "_result_c" + extension, output_color_image)
            util.save_image(output_directory + filename + "_loss" + extension, loss_image)

        elif true_image.shape[2] == 1 and self.channels == 1:

            # for monochrome images
            input_image = loader.build_input_image(true_image, channels=self.channels, scale=self.scale,
                                                   alignment=self.scale)
            input_bicubic_y_image = util.resize_image_by_pil(input_image, self.scale,
                                                             resampling_method=self.resampling_method)
            output_image = self.do(input_image, input_bicubic_y_image)
            psnr, ssim = util.compute_psnr_and_ssim(true_image, output_image, border_size=self.psnr_calc_border_size)
            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_result" + extension, output_image)
        else:
            return None, None

        if print_console:
            print("[%s] PSNR:%f, SSIM:%f" % (filename, psnr, ssim))

        return psnr, ssim
Ejemplo n.º 6
0
 def copy_log_to_archive(self, archive_name):
     archive_directory = self.tf_log_dir + '_' + archive_name
     model_archive_directory = archive_directory + '/' + self.name
     util.make_dir(archive_directory)
     util.delete_dir(model_archive_directory)
     try:
         shutil.copytree(self.tf_log_dir, model_archive_directory)
         print("tensorboard log archived to [%s]." % model_archive_directory)
     except OSError as e:
         print(e)
         print("NG: tensorboard log archived to [%s]." % model_archive_directory)
Ejemplo n.º 7
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()

    print("Building x%d augmented data." % FLAGS.augment_level)

    training_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                     FLAGS.dataset + "/")
    target_dir = FLAGS.data_dir + "/" + FLAGS.dataset + ("_%d/" %
                                                         FLAGS.augment_level)
    util.make_dir(target_dir)

    for file_path in training_filenames:
        org_image = util.load_image(file_path)

        filename = os.path.basename(file_path)
        filename, extension = os.path.splitext(filename)

        new_filename = target_dir + filename
        util.save_image(new_filename + extension, org_image)

        if FLAGS.augment_level >= 2:
            ud_image = np.flipud(org_image)
            util.save_image(new_filename + "_v" + extension, ud_image)
        if FLAGS.augment_level >= 3:
            lr_image = np.fliplr(org_image)
            util.save_image(new_filename + "_h" + extension, lr_image)
        if FLAGS.augment_level >= 4:
            lr_image = np.fliplr(org_image)
            lrud_image = np.flipud(lr_image)
            util.save_image(new_filename + "_hv" + extension, lrud_image)

        if FLAGS.augment_level >= 5:
            rotated_image1 = np.rot90(org_image)
            util.save_image(new_filename + "_r1" + extension, rotated_image1)
        if FLAGS.augment_level >= 6:
            rotated_image2 = np.rot90(org_image, -1)
            util.save_image(new_filename + "_r2" + extension, rotated_image2)

        if FLAGS.augment_level >= 7:
            rotated_image1 = np.rot90(org_image)
            ud_image = np.flipud(rotated_image1)
            util.save_image(new_filename + "_r1_v" + extension, ud_image)
        if FLAGS.augment_level >= 8:
            rotated_image2 = np.rot90(org_image, -1)
            ud_image = np.flipud(rotated_image2)
            util.save_image(new_filename + "_r2_v" + extension, ud_image)
Ejemplo n.º 8
0
    def build_batch(self, data_dir):
        """ Build batch images and. """

        print("Building batch images for %s..." % self.batch_dir)
        filenames = util.get_files_in_directory(data_dir)
        images_count = 0

        util.make_dir(self.batch_dir)
        util.clean_dir(self.batch_dir)
        util.make_dir(self.batch_dir + "/" + INPUT_IMAGE_DIR)
        util.make_dir(self.batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
        util.make_dir(self.batch_dir + "/" + TRUE_IMAGE_DIR)

        processed_images = 0
        for filename in filenames:
            output_window_size = self.batch_image_size * self.scale
            output_window_stride = self.stride * self.scale

            input_image, input_interpolated_image, true_image = \
                build_image_set(filename, channels=self.channels, resampling_method=self.resampling_method,
                                scale=self.scale, print_console=False)

            # split into batch images
            input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
            input_interpolated_batch_images = util.get_split_images(input_interpolated_image, output_window_size,
                                                                    stride=output_window_stride)

            if input_batch_images is None or input_interpolated_batch_images is None:
                # if the original image size * scale is less than batch image size
                continue
            input_count = input_batch_images.shape[0]

            true_batch_images = util.get_split_images(true_image, output_window_size, stride=output_window_stride)

            for i in range(input_count):
                self.save_input_batch_image(images_count, input_batch_images[i])
                self.save_interpolated_batch_image(images_count, input_interpolated_batch_images[i])
                self.save_true_batch_image(images_count, true_batch_images[i])
                images_count += 1
            processed_images += 1
            if processed_images % 10 == 0:
                print('.', end='', flush=True)

        print("Finished")
        self.count = images_count

        print("%d mini-batch images are built(saved)." % images_count)

        config = configparser.ConfigParser()
        config.add_section("batch")
        config.set("batch", "count", str(images_count))
        config.set("batch", "scale", str(self.scale))
        config.set("batch", "batch_image_size", str(self.batch_image_size))
        config.set("batch", "stride", str(self.stride))
        config.set("batch", "channels", str(self.channels))

        with open(self.batch_dir + "/batch_images.ini", "w") as configfile:
            config.write(configfile)
Ejemplo n.º 9
0
	def build_batch(self, data_dir, batch_dir):
		""" load from input files. Then save batch images on file to reduce memory consumption. """

		print("Building batch images for %s..." % batch_dir)
		filenames = util.get_files_in_directory(data_dir)
		images_count = 0

		util.make_dir(batch_dir)
		util.clean_dir(batch_dir)
		util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
		util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
		util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)

		for filename in filenames:
			output_window_size = self.batch_image_size * self.scale
			output_window_stride = self.stride * self.scale

			input_image, input_interpolated_image = self.input.load_input_image(filename, rescale=True,
			                                                                    resampling_method=self.resampling_method)
			test_image = self.true.load_test_image(filename)

			# split into batch images
			input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
			input_interpolated_batch_images = util.get_split_images(input_interpolated_image, output_window_size,
			                                                        stride=output_window_stride)
			if input_batch_images is None or input_interpolated_batch_images is None:
				continue
			input_count = input_batch_images.shape[0]

			test_batch_images = util.get_split_images(test_image, output_window_size, stride=output_window_stride)

			for i in range(input_count):
				save_input_batch_image(batch_dir, images_count, input_batch_images[i])
				save_interpolated_batch_image(batch_dir, images_count, input_interpolated_batch_images[i])
				save_true_batch_image(batch_dir, images_count, test_batch_images[i])
				images_count += 1

		print("%d mini-batch images are built(saved)." % images_count)

		config = configparser.ConfigParser()
		config.add_section("batch")
		config.set("batch", "count", str(images_count))
		config.set("batch", "scale", str(self.scale))
		config.set("batch", "batch_image_size", str(self.batch_image_size))
		config.set("batch", "stride", str(self.stride))
		config.set("batch", "channels", str(self.channels))
		config.set("batch", "jpeg_mode", str(self.jpeg_mode))
		config.set("batch", "max_value", str(self.max_value))

		with open(batch_dir + "/batch_images.ini", "w") as configfile:
			config.write(configfile)
Ejemplo n.º 10
0
Archivo: LFFN.py Proyecto: qibao77/LFFN
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.scale = flags.scale
        self.layers = flags.layers
        self.depth_wise_convolution = flags.depth_wise_convolution
        self.resampling_method = BICUBIC_METHOD_STRING
        self.self_ensemble = flags.self_ensemble

        # Training Parameters
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        self.clipping_norm = flags.clipping_norm

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.training_images = int(
            math.ceil(flags.training_images / flags.batch_num) *
            flags.batch_num)

        # Image Processing Parameters
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.output_channels = flags.channels
        self.psnr_calc_border_size = flags.psnr_calc_border_size
        if self.psnr_calc_border_size < 0:
            self.psnr_calc_border_size = 2 + self.scale

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialize_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nLFFN-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
Ejemplo n.º 11
0
    def build_training_datasets(self,
                                data_dir,
                                batch_dir,
                                batch_image_size,
                                stride_size=0):

        print("Building datasets for [%s]..." % "train")
        util.make_dir(batch_dir)

        if stride_size == 0:
            stride_size = batch_image_size // 2

        self.train = loader.DataSets(self.scale,
                                     batch_image_size,
                                     stride_size,
                                     channels=self.channels,
                                     jpeg_mode=self.jpeg_mode,
                                     max_value=self.max_value,
                                     resampling_method=self.resampling_method)

        if not self.train.is_batch_exist(batch_dir):
            self.train.build_batch(data_dir, batch_dir)
Ejemplo n.º 12
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()

    print("Building Y channel data...")

    training_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" +
                                                     FLAGS.dataset + "/")
    target_dir = FLAGS.data_dir + "/" + FLAGS.dataset + "_y/"
    util.make_dir(target_dir)

    for file_path in training_filenames:
        org_image = util.load_image(file_path)
        if org_image.shape[2] == 3:
            org_image = util.convert_rgb_to_y(org_image)

        filename = os.path.basename(file_path)
        filename, extension = os.path.splitext(filename)

        new_filename = target_dir + filename
        util.save_image(new_filename + ".bmp", org_image)
Ejemplo n.º 13
0
def main(not_parsed_args):
    if len(not_parsed_args) > 1:
        print("Unknown args:%s" % not_parsed_args)
        exit()

    print("Building x%d augmented data." % FLAGS.augment_level)

    training_filenames = util.get_files_in_directory(
        "/media/data3/ww/sr_data/DIV2K_train_HR/")
    target_dir = "/media/data3/ww/sr_data/DIV2K_train_HR" + (
        "_%d/" % FLAGS.augment_level)
    util.make_dir(target_dir)

    writer = tf.python_io.TFRecordWriter("DIV2K_org.tfrecords")
    writer2 = tf.python_io.TFRecordWriter("DIV2K_aug.tfrecords")
    for file_path in training_filenames:
        org_image = util.load_image(file_path)
        org_raw = org_image.tobytes()  #convert image to bytes

        train_object = tf.train.Example(features=tf.train.Features(
            feature={
                'org_raw':
                tf.train.Feature(bytes_list=tf.train.BytesList(
                    value=[org_raw]))
            }))
        writer.write(train_object.SerializeToString())

        ud_image = np.flipud(org_image)
        ud_raw = ud_image.tobytes()  # convert image to bytes

        train_object2 = tf.train.Example(features=tf.train.Features(
            feature={
                'org_raw':
                tf.train.Feature(bytes_list=tf.train.BytesList(value=[ud_raw]))
            }))
        writer2.write(train_object2.SerializeToString())
    writer.close()
Ejemplo n.º 14
0
    def load_datasets(self, data_dir, batch_dir, batch_image_size, stride_size=0):
        """ build input patch images and loads as a datasets
        Opens image directory as a datasets.
        Each images are splitted into patch images and converted to input image. Since loading
        (especially from PNG/JPG) and building input-LR images needs much computation in the
        training phase, building pre-processed images makes training much faster. However, images
        are limited by divided grids.
        """

        batch_dir += "/scale%d" % self.scale

        self.train = loader.BatchDataSets(self.scale, batch_dir, batch_image_size, stride_size, channels=self.channels,
                                          resampling_method=self.resampling_method)
        if not self.train.is_batch_exist():
            util.make_dir(batch_dir)
            util.clean_dir(batch_dir)
            util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
            util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
            util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)
            self.train.build_batch_threaded(data_dir, batch_dir, self.threads)
        else:
            self.train.load_batch_counts()
        self.train.load_all_batch_images(self.threads)
Ejemplo n.º 15
0
def load_and_evaluate_tflite_graph(
    output_dir,
    data_dir,
    test_data,
    model_path=os.path.join(os.getcwd(),
                            'model_to_freeze/converted_model.tflite')):
    # https://stackoverflow.com/questions/50443411/how-to-load-a-tflite-model-in-script
    # https://www.tensorflow.org/lite/convert/python_api#tensorflow_lite_python_interpreter_
    output_directory = output_dir
    output_directory += "/" + "tflite" + "/"
    util.make_dir(output_directory)

    test_filepaths = util.get_files_in_directory(data_dir + "/" + test_data)
    total_psnr = total_ssim = total_time = 0

    # Load TFLite model and allocate tensors.
    interpreter = tf.lite.Interpreter(model_path=model_path)
    # interpreter = tf.contrib.lite.Interpreter(model_path=model_path)
    interpreter.allocate_tensors()

    # Get input and output tensors.
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    for file_path in test_filepaths:
        # split filename from extension
        filename, extension = os.path.splitext(file_path)

        # prepare true image
        true_image = util.set_image_alignment(
            util.load_image(file_path, print_console=False), FLAGS.scale)

        # start the timer
        if true_image.shape[2] == 3 and FLAGS.channels == 1:
            # prepare input and ground truth images
            input_y_image = loader.build_input_image(true_image,
                                                     channels=FLAGS.channels,
                                                     scale=FLAGS.scale,
                                                     alignment=FLAGS.scale,
                                                     convert_ycbcr=True)
            input_bicubic_y_image = util.resize_image_by_pil(
                input_y_image,
                FLAGS.scale,
                resampling_method=FLAGS.resampling_method)
            true_ycbcr_image = util.convert_rgb_to_ycbcr(true_image)

            # pass inputs through the model (need to recast and reshape inputs)
            input_y_image_reshaped = input_y_image.astype('float32')
            input_y_image_reshaped = input_y_image_reshaped.reshape(
                1, input_y_image.shape[0], input_y_image.shape[1],
                FLAGS.channels)

            input_bicubic_y_image_reshaped = input_bicubic_y_image.astype(
                'float32')
            input_bicubic_y_image_reshaped = input_bicubic_y_image_reshaped.reshape(
                1, input_bicubic_y_image.shape[0],
                input_bicubic_y_image.shape[1], FLAGS.channels)

            interpreter.set_tensor(input_details[0]['index'],
                                   input_y_image_reshaped)  # pass x
            interpreter.set_tensor(input_details[1]['index'],
                                   input_bicubic_y_image_reshaped)  # pass x2

            start = time.time()
            interpreter.invoke()
            end = time.time()

            output_y_image = interpreter.get_tensor(
                output_details[0]['index'])  # get y
            # resize the output into an image
            output_y_image = output_y_image.reshape(output_y_image.shape[1],
                                                    output_y_image.shape[2],
                                                    FLAGS.channels)

            # calculate psnr and ssim for the output
            psnr, ssim = util.compute_psnr_and_ssim(
                true_ycbcr_image[:, :, 0:1],
                output_y_image,
                border_size=FLAGS.psnr_calc_border_size)

            # get the loss image
            loss_image = util.get_loss_image(
                true_ycbcr_image[:, :, 0:1],
                output_y_image,
                border_size=FLAGS.psnr_calc_border_size)

            # get output color image
            output_color_image = util.convert_y_and_cbcr_to_rgb(
                output_y_image, true_ycbcr_image[:, :, 1:3])

            # save all images
            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_input" + extension,
                            input_y_image)
            util.save_image(
                output_directory + filename + "_input_bicubic" + extension,
                input_bicubic_y_image)
            util.save_image(
                output_directory + filename + "_true_y" + extension,
                true_ycbcr_image[:, :, 0:1])
            util.save_image(
                output_directory + filename + "_result" + extension,
                output_y_image)
            util.save_image(
                output_directory + filename + "_result_c" + extension,
                output_color_image)
            util.save_image(output_directory + filename + "_loss" + extension,
                            loss_image)
        elapsed_time = end - start
        total_psnr += psnr
        total_ssim += ssim
        total_time += elapsed_time
    testSize = len(test_filepaths)
    print("Model Average [%s] PSNR:%f, SSIM:%f, Elapsed Time:%f" %
          (test_data, total_psnr / testSize, total_ssim / testSize,
           total_time / testSize))
Ejemplo n.º 16
0
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.scale = flags.scale
        self.layers = flags.layers
        self.filters = flags.filters
        self.min_filters = min(flags.filters, flags.min_filters)
        self.filters_decay_gamma = flags.filters_decay_gamma
        self.use_nin = flags.use_nin
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2
        self.reconstruct_layers = max(flags.reconstruct_layers, 1)
        self.reconstruct_filters = flags.reconstruct_filters
        self.resampling_method = flags.resampling_method
        self.pixel_shuffler = flags.pixel_shuffler
        self.pixel_shuffler_filters = flags.pixel_shuffler_filters
        self.self_ensemble = flags.self_ensemble
        self.depthwise_seperable = flags.depthwise_seperable
        self.bottleneck = flags.bottleneck

        # Training Parameters
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.epsilon = flags.epsilon
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size
        self.clipping_norm = flags.clipping_norm
        self.use_l1_loss = flags.use_l1_loss

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.training_images = int(
            math.ceil(flags.training_images / flags.batch_num) *
            flags.batch_num)
        self.train = None
        self.test = None
        self.gpu_device_id = flags.gpu_device_id

        # Image Processing Parameters
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.output_channels = 1
        self.psnr_calc_border_size = flags.psnr_calc_border_size
        if self.psnr_calc_border_size < 0:
            self.psnr_calc_border_size = self.scale
        self.input_image_width = flags.input_image_width
        self.input_image_height = flags.input_image_height

        # Environment (all directory name should not contain tailing '/'  )
        self.batch_dir = flags.batch_dir

        # initialize variables
        self.name = self.get_model_name(model_name,
                                        name_postfix=flags.name_postfix)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialize_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nDCSCN v2-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
Ejemplo n.º 17
0
Archivo: LFFN.py Proyecto: qibao77/LFFN
    def do_for_evaluate_with_output(self,
                                    file_path,
                                    output_directory,
                                    print_console=False):

        filename, extension = os.path.splitext(file_path)
        output_directory += "/" + self.name + "/"
        util.make_dir(output_directory)

        true_image = util.set_image_alignment(
            util.load_image(file_path, print_console=False), self.scale)

        if true_image.shape[2] == 3 and self.channels == 3:

            # for color images
            input_image = util.build_input_image(true_image,
                                                 scale=self.scale,
                                                 alignment=self.scale)
            input_bicubic_image = util.resize_image_by_pil(
                input_image,
                self.scale,
                resampling_method=self.resampling_method)

            output_image, spend_time = self.do(input_image)  # SR

            SR_y = eva.convert_rgb_to_y(output_image)
            HR_y = eva.convert_rgb_to_y(true_image)
            psnr_predicted = eva.PSNR(np.uint8(HR_y),
                                      np.uint8(SR_y),
                                      shave_border=self.psnr_calc_border_size)
            ssim_predicted = eva.compute_ssim(np.squeeze(HR_y),
                                              np.squeeze(SR_y))

            mse = util.compute_mse(HR_y,
                                   SR_y,
                                   border_size=self.psnr_calc_border_size)
            loss_image = util.get_loss_image(
                HR_y, SR_y, border_size=self.psnr_calc_border_size)

            util.save_image(output_directory + file_path[29:], true_image)
            util.save_image(
                output_directory + filename[28:] + "_input" + extension,
                input_image)
            util.save_image(
                output_directory + filename[28:] + "_input_bicubic" +
                extension, input_bicubic_image)
            util.save_image(
                output_directory + filename[28:] + "_sr" + extension,
                output_image)
            util.save_image(
                output_directory + filename[28:] + "_loss" + extension,
                loss_image)

        elif true_image.shape[2] == 1 and self.channels == 1:
            # for monochrome images
            input_image = util.build_input_image(true_image,
                                                 scale=self.scale,
                                                 alignment=self.scale)
            output_image, spend_time = self.do(input_image)

            psnr_predicted = eva.PSNR(np.uint8(true_image),
                                      np.uint8(output_image),
                                      shave_border=self.psnr_calc_border_size)
            ssim_predicted = eva.compute_ssim(np.squeeze(true_image),
                                              np.squeeze(output_image))

            mse = util.compute_mse(true_image,
                                   output_image,
                                   border_size=self.psnr_calc_border_size)
            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_sr" + extension,
                            output_image)
        else:
            psnr_predicted = 0.0
            ssim_predicted = 0.0
            mse = 0.0
            spend_time = 0.0

        if print_console:
            print("[%s] psnr:%f, ssim:%f, time:%f" %
                  (filename, psnr_predicted, ssim_predicted, spend_time))

        return mse, psnr_predicted, ssim_predicted, spend_time
Ejemplo n.º 18
0
    def __init__(self, flags, model_name=""):

        # Model Parameters
        self.filters = flags.filters
        self.min_filters = flags.min_filters
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2 if flags.nin_filters2 != 0 else flags.nin_filters // 2
        self.cnn_size = flags.cnn_size
        self.last_cnn_size = flags.last_cnn_size
        self.cnn_stride = 1
        self.layers = flags.layers
        self.nin = flags.nin
        self.bicubic_init = flags.bicubic_init
        self.dropout = flags.dropout
        self.activator = flags.activator
        self.filters_decay_gamma = flags.filters_decay_gamma

        # Training Parameters
        self.initializer = flags.initializer
        self.weight_dev = flags.weight_dev
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.dataset = flags.dataset
        self.test_dataset = flags.test_dataset

        # Image Processing Parameters
        self.scale = flags.scale
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.jpeg_mode = flags.jpeg_mode
        self.output_channels = self.scale * self.scale

        # Environment (all directory name should not contain '/' after )
        self.checkpoint_dir = flags.checkpoint_dir
        self.tf_log_dir = flags.tf_log_dir

        # Debugging or Logging
        self.debug = flags.debug
        self.save_loss = flags.save_loss
        self.save_weights = flags.save_weights
        self.save_images = flags.save_images
        self.save_images_num = flags.save_images_num
        self.log_weight_image_num = 32

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.batch_input = self.batch_num * [None]
        self.batch_input_quad = self.batch_num * [None]
        self.batch_true_quad = self.batch_num * [None]
        self.receptive_fields = 2 * self.layers + self.cnn_size - 2
        self.complexity = 0

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialise_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.InteractiveSession(config=config)
        self.init_train_step()

        logging.info("\nDCSCN -------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))
Ejemplo n.º 19
0
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.layers = flags.layers
        self.filters = flags.filters
        self.min_filters = min(flags.filters, flags.min_filters)
        self.filters_decay_gamma = flags.filters_decay_gamma
        self.use_nin = flags.use_nin
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2
        self.reconstruct_layers = max(flags.reconstruct_layers, 1)
        self.reconstruct_filters = flags.reconstruct_filters
        self.resampling_method = BICUBIC_METHOD_STRING
        self.pixel_shuffler = flags.pixel_shuffler
        self.self_ensemble = flags.self_ensemble

        # Training Parameters
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size
        self.clipping_norm = flags.clipping_norm

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.dataset = flags.dataset
        self.test_dataset = flags.test_dataset
        self.training_image_count = max(
            1, (flags.training_images // flags.batch_num)) * flags.batch_num
        self.train = None
        self.test = None

        # Image Processing Parameters
        self.scale = flags.scale
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.jpeg_mode = flags.jpeg_mode
        self.output_channels = 1

        # Environment (all directory name should not contain '/' after )
        self.batch_dir = flags.batch_dir

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialise_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nDCSCN v2-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
Ejemplo n.º 20
0
    def do_for_evaluate(self,
                        file_path,
                        output_directory="output",
                        output=True,
                        print_console=False):

        filename, extension = os.path.splitext(file_path)
        output_directory += "/" + self.name + "/"
        util.make_dir(output_directory)
        true_image = util.set_image_alignment(
            util.load_image(file_path, print_console=False), self.scale)

        if true_image.shape[2] == 3 and self.channels == 1:
            input_y_image = loader.build_input_image(true_image,
                                                     channels=self.channels,
                                                     scale=self.scale,
                                                     alignment=self.scale,
                                                     convert_ycbcr=True,
                                                     jpeg_mode=self.jpeg_mode)
            # for color images
            if output:
                input_bicubic_y_image = util.resize_image_by_pil(
                    input_y_image,
                    self.scale,
                    resampling_method=self.resampling_method)

                true_ycbcr_image = util.convert_rgb_to_ycbcr(
                    true_image, jpeg_mode=self.jpeg_mode)

                output_y_image = self.do(input_y_image, input_bicubic_y_image)
                mse = util.compute_mse(true_ycbcr_image[:, :, 0:1],
                                       output_y_image,
                                       border_size=6 + self.scale)
                loss_image = util.get_loss_image(true_ycbcr_image[:, :, 0:1],
                                                 output_y_image,
                                                 border_size=self.scale)

                output_color_image = util.convert_y_and_cbcr_to_rgb(
                    output_y_image,
                    true_ycbcr_image[:, :, 1:3],
                    jpeg_mode=self.jpeg_mode)

                util.save_image(output_directory + file_path, true_image)
                util.save_image(
                    output_directory + filename + "_input" + extension,
                    input_y_image)
                util.save_image(
                    output_directory + filename + "_input_bicubic" + extension,
                    input_bicubic_y_image)
                util.save_image(
                    output_directory + filename + "_true_y" + extension,
                    true_ycbcr_image[:, :, 0:1])
                util.save_image(
                    output_directory + filename + "_result" + extension,
                    output_y_image)
                util.save_image(
                    output_directory + filename + "_result_c" + extension,
                    output_color_image)
                util.save_image(
                    output_directory + filename + "_loss" + extension,
                    loss_image)
            else:
                true_y_image = util.convert_rgb_to_y(true_image,
                                                     jpeg_mode=self.jpeg_mode)
                input_bicubic_y_image = util.resize_image_by_pil(
                    input_y_image,
                    self.scale,
                    resampling_method=self.resampling_method)
                output_y_image = self.do(input_y_image, input_bicubic_y_image)
                mse = util.compute_mse(true_y_image,
                                       output_y_image,
                                       border_size=6 + self.scale)

        elif true_image.shape[2] == 1 and self.channels == 1:

            # for monochrome images
            input_image = loader.build_input_image(true_image,
                                                   channels=self.channels,
                                                   scale=self.scale,
                                                   alignment=self.scale)
            input_bicubic_y_image = util.resize_image_by_pil(
                input_image,
                self.scale,
                resampling_method=self.resampling_method)
            output_image = self.do(input_image, input_bicubic_y_image)
            mse = util.compute_mse(true_image,
                                   output_image,
                                   border_size=6 + self.scale)
            if output:
                util.save_image(output_directory + file_path, true_image)
                util.save_image(
                    output_directory + filename + "_result" + extension,
                    output_image)
        else:
            mse = 0

        if print_console:
            print("MSE:%f, PSNR:%f" % (mse, util.get_psnr(mse)))

        return mse
Ejemplo n.º 21
0
    def do_for_evaluate_with_output(self,
                                    file_path,
                                    output_directory=None,
                                    print_console=False):
        true_image = util.set_image_alignment(
            util.load_image(file_path, print_console=False), self.scale)

        # Assuming the image is color
        assert true_image.shape[
            2] == 3 and self.channels == 1, "Only 3-channel images are supported"

        input_image = loader.build_input_image(true_image,
                                               scale=self.scale,
                                               alignment=self.scale)

        input_y_image = util.convert_rgb_to_y(input_image)
        true_y_image = util.convert_rgb_to_y(true_image)
        input_bicubic_y_image = util.resize_image_by_pil(
            input_y_image,
            self.scale,
            resampling_method=self.resampling_method)

        output_y_image = self.do(input_y_image, input_bicubic_y_image)

        psnr, ssim = util.compute_psnr_and_ssim(
            true_y_image,
            output_y_image,
            border_size=self.psnr_calc_border_size)

        if output_directory:
            true_ycbcr_image = util.convert_rgb_to_ycbcr(true_image)
            _, true_cbcr = util.convert_ycbcr_to_y_cbcr(true_ycbcr_image)
            output_color_image = util.convert_y_and_cbcr_to_rgb(
                output_y_image, true_cbcr)

            loss_image = util.get_loss_image(
                true_y_image,
                output_y_image,
                border_size=self.psnr_calc_border_size)

            filename, extension = os.path.splitext(file_path)
            output_directory += "/" + self.name + "/"
            util.make_dir(output_directory)

            util.save_image(output_directory + file_path, true_image)
            util.save_image(output_directory + filename + "_input" + extension,
                            input_y_image)
            util.save_image(
                output_directory + filename + "_input_bicubic" + extension,
                input_bicubic_y_image)
            util.save_image(
                output_directory + filename + "_true_y" + extension,
                true_ycbcr_image[:, :, 0:1])
            util.save_image(
                output_directory + filename + "_result" + extension,
                output_y_image)
            util.save_image(
                output_directory + filename + "_result_c" + extension,
                output_color_image)
            util.save_image(output_directory + filename + "_loss" + extension,
                            loss_image)

        if print_console:
            print("[%s] PSNR:%f, SSIM:%f" % (filename, psnr, ssim))

        return psnr, ssim