Example #1
0
File: LFFN.py Project: qibao77/LFFN
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.scale = flags.scale
        self.layers = flags.layers
        self.depth_wise_convolution = flags.depth_wise_convolution
        self.resampling_method = BICUBIC_METHOD_STRING
        self.self_ensemble = flags.self_ensemble

        # Training Parameters
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        self.clipping_norm = flags.clipping_norm

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.training_images = int(
            math.ceil(flags.training_images / flags.batch_num) *
            flags.batch_num)

        # Image Processing Parameters
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.output_channels = flags.channels
        self.psnr_calc_border_size = flags.psnr_calc_border_size
        if self.psnr_calc_border_size < 0:
            self.psnr_calc_border_size = 2 + self.scale

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialize_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nLFFN-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
    def build_batch(self, data_dir):
        """ Build batch images and. """

        print("Building batch images for %s..." % self.batch_dir)
        filenames = util.get_files_in_directory(data_dir)
        images_count = 0

        util.make_dir(self.batch_dir)
        util.clean_dir(self.batch_dir)
        util.make_dir(self.batch_dir + "/" + INPUT_IMAGE_DIR)
        util.make_dir(self.batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
        util.make_dir(self.batch_dir + "/" + TRUE_IMAGE_DIR)

        processed_images = 0
        for filename in filenames:
            output_window_size = self.batch_image_size * self.scale
            output_window_stride = self.stride * self.scale

            input_image, input_interpolated_image, true_image = \
                build_image_set(filename, channels=self.channels, resampling_method=self.resampling_method,
                                scale=self.scale, print_console=False)

            # split into batch images
            input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
            input_interpolated_batch_images = util.get_split_images(input_interpolated_image, output_window_size,
                                                                    stride=output_window_stride)

            if input_batch_images is None or input_interpolated_batch_images is None:
                # if the original image size * scale is less than batch image size
                continue
            input_count = input_batch_images.shape[0]

            true_batch_images = util.get_split_images(true_image, output_window_size, stride=output_window_stride)

            for i in range(input_count):
                self.save_input_batch_image(images_count, input_batch_images[i])
                self.save_interpolated_batch_image(images_count, input_interpolated_batch_images[i])
                self.save_true_batch_image(images_count, true_batch_images[i])
                images_count += 1
            processed_images += 1
            if processed_images % 10 == 0:
                print('.', end='', flush=True)

        print("Finished")
        self.count = images_count

        print("%d mini-batch images are built(saved)." % images_count)

        config = configparser.ConfigParser()
        config.add_section("batch")
        config.set("batch", "count", str(images_count))
        config.set("batch", "scale", str(self.scale))
        config.set("batch", "batch_image_size", str(self.batch_image_size))
        config.set("batch", "stride", str(self.stride))
        config.set("batch", "channels", str(self.channels))

        with open(self.batch_dir + "/batch_images.ini", "w") as configfile:
            config.write(configfile)
	def build_batch(self, data_dir, batch_dir):
		""" load from input files. Then save batch images on file to reduce memory consumption. """

		print("Building batch images for %s..." % batch_dir)
		filenames = util.get_files_in_directory(data_dir)
		images_count = 0

		util.make_dir(batch_dir)
		util.clean_dir(batch_dir)
		util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
		util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
		util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)

		for filename in filenames:
			output_window_size = self.batch_image_size * self.scale
			output_window_stride = self.stride * self.scale

			input_image, input_interpolated_image = self.input.load_input_image(filename, rescale=True,
			                                                                    resampling_method=self.resampling_method)
			test_image = self.true.load_test_image(filename)

			# split into batch images
			input_batch_images = util.get_split_images(input_image, self.batch_image_size, stride=self.stride)
			input_interpolated_batch_images = util.get_split_images(input_interpolated_image, output_window_size,
			                                                        stride=output_window_stride)
			if input_batch_images is None or input_interpolated_batch_images is None:
				continue
			input_count = input_batch_images.shape[0]

			test_batch_images = util.get_split_images(test_image, output_window_size, stride=output_window_stride)

			for i in range(input_count):
				save_input_batch_image(batch_dir, images_count, input_batch_images[i])
				save_interpolated_batch_image(batch_dir, images_count, input_interpolated_batch_images[i])
				save_true_batch_image(batch_dir, images_count, test_batch_images[i])
				images_count += 1

		print("%d mini-batch images are built(saved)." % images_count)

		config = configparser.ConfigParser()
		config.add_section("batch")
		config.set("batch", "count", str(images_count))
		config.set("batch", "scale", str(self.scale))
		config.set("batch", "batch_image_size", str(self.batch_image_size))
		config.set("batch", "stride", str(self.stride))
		config.set("batch", "channels", str(self.channels))
		config.set("batch", "jpeg_mode", str(self.jpeg_mode))
		config.set("batch", "max_value", str(self.max_value))

		with open(batch_dir + "/batch_images.ini", "w") as configfile:
			config.write(configfile)
Example #4
0
    def load_datasets(self, data_dir, batch_dir, batch_image_size, stride_size=0):
        """ build input patch images and loads as a datasets
        Opens image directory as a datasets.
        Each images are splitted into patch images and converted to input image. Since loading
        (especially from PNG/JPG) and building input-LR images needs much computation in the
        training phase, building pre-processed images makes training much faster. However, images
        are limited by divided grids.
        """

        batch_dir += "/scale%d" % self.scale

        self.train = loader.BatchDataSets(self.scale, batch_dir, batch_image_size, stride_size, channels=self.channels,
                                          resampling_method=self.resampling_method)
        if not self.train.is_batch_exist():
            util.make_dir(batch_dir)
            util.clean_dir(batch_dir)
            util.make_dir(batch_dir + "/" + INPUT_IMAGE_DIR)
            util.make_dir(batch_dir + "/" + INTERPOLATED_IMAGE_DIR)
            util.make_dir(batch_dir + "/" + TRUE_IMAGE_DIR)
            self.train.build_batch_threaded(data_dir, batch_dir, self.threads)
        else:
            self.train.load_batch_counts()
        self.train.load_all_batch_images(self.threads)
Example #5
0
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.layers = flags.layers
        self.filters = flags.filters
        self.min_filters = min(flags.filters, flags.min_filters)
        self.filters_decay_gamma = flags.filters_decay_gamma
        self.use_nin = flags.use_nin
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2
        self.reconstruct_layers = max(flags.reconstruct_layers, 1)
        self.reconstruct_filters = flags.reconstruct_filters
        self.resampling_method = BICUBIC_METHOD_STRING
        self.pixel_shuffler = flags.pixel_shuffler
        self.self_ensemble = flags.self_ensemble

        # Training Parameters
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size
        self.clipping_norm = flags.clipping_norm

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.dataset = flags.dataset
        self.test_dataset = flags.test_dataset
        self.training_image_count = max(
            1, (flags.training_images // flags.batch_num)) * flags.batch_num
        self.train = None
        self.test = None

        # Image Processing Parameters
        self.scale = flags.scale
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.jpeg_mode = flags.jpeg_mode
        self.output_channels = 1

        # Environment (all directory name should not contain '/' after )
        self.batch_dir = flags.batch_dir

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialise_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nDCSCN v2-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
Example #6
0
    def __init__(self, flags, model_name=""):

        super().__init__(flags)

        # Model Parameters
        self.scale = flags.scale
        self.layers = flags.layers
        self.filters = flags.filters
        self.min_filters = min(flags.filters, flags.min_filters)
        self.filters_decay_gamma = flags.filters_decay_gamma
        self.use_nin = flags.use_nin
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2
        self.reconstruct_layers = max(flags.reconstruct_layers, 1)
        self.reconstruct_filters = flags.reconstruct_filters
        self.resampling_method = flags.resampling_method
        self.pixel_shuffler = flags.pixel_shuffler
        self.pixel_shuffler_filters = flags.pixel_shuffler_filters
        self.self_ensemble = flags.self_ensemble
        self.depthwise_seperable = flags.depthwise_seperable
        self.bottleneck = flags.bottleneck

        # Training Parameters
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.epsilon = flags.epsilon
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size
        self.clipping_norm = flags.clipping_norm
        self.use_l1_loss = flags.use_l1_loss

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.training_images = int(
            math.ceil(flags.training_images / flags.batch_num) *
            flags.batch_num)
        self.train = None
        self.test = None
        self.gpu_device_id = flags.gpu_device_id

        # Image Processing Parameters
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.output_channels = 1
        self.psnr_calc_border_size = flags.psnr_calc_border_size
        if self.psnr_calc_border_size < 0:
            self.psnr_calc_border_size = self.scale
        self.input_image_width = flags.input_image_width
        self.input_image_height = flags.input_image_height

        # Environment (all directory name should not contain tailing '/'  )
        self.batch_dir = flags.batch_dir

        # initialize variables
        self.name = self.get_model_name(model_name,
                                        name_postfix=flags.name_postfix)
        self.total_epochs = 0
        lr = self.initial_lr
        while lr > flags.end_lr:
            self.total_epochs += self.lr_decay_epoch
            lr *= self.lr_decay

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialize_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        logging.info("\nDCSCN v2-------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))

        self.init_train_step()
Example #7
0
    def __init__(self, flags, model_name=""):

        # Model Parameters
        self.filters = flags.filters
        self.min_filters = flags.min_filters
        self.nin_filters = flags.nin_filters
        self.nin_filters2 = flags.nin_filters2 if flags.nin_filters2 != 0 else flags.nin_filters // 2
        self.cnn_size = flags.cnn_size
        self.last_cnn_size = flags.last_cnn_size
        self.cnn_stride = 1
        self.layers = flags.layers
        self.nin = flags.nin
        self.bicubic_init = flags.bicubic_init
        self.dropout = flags.dropout
        self.activator = flags.activator
        self.filters_decay_gamma = flags.filters_decay_gamma

        # Training Parameters
        self.initializer = flags.initializer
        self.weight_dev = flags.weight_dev
        self.l2_decay = flags.l2_decay
        self.optimizer = flags.optimizer
        self.beta1 = flags.beta1
        self.beta2 = flags.beta2
        self.momentum = flags.momentum
        self.batch_num = flags.batch_num
        self.batch_image_size = flags.batch_image_size
        if flags.stride_size == 0:
            self.stride_size = flags.batch_image_size // 2
        else:
            self.stride_size = flags.stride_size

        # Learning Rate Control for Training
        self.initial_lr = flags.initial_lr
        self.lr_decay = flags.lr_decay
        self.lr_decay_epoch = flags.lr_decay_epoch

        # Dataset or Others
        self.dataset = flags.dataset
        self.test_dataset = flags.test_dataset

        # Image Processing Parameters
        self.scale = flags.scale
        self.max_value = flags.max_value
        self.channels = flags.channels
        self.jpeg_mode = flags.jpeg_mode
        self.output_channels = self.scale * self.scale

        # Environment (all directory name should not contain '/' after )
        self.checkpoint_dir = flags.checkpoint_dir
        self.tf_log_dir = flags.tf_log_dir

        # Debugging or Logging
        self.debug = flags.debug
        self.save_loss = flags.save_loss
        self.save_weights = flags.save_weights
        self.save_images = flags.save_images
        self.save_images_num = flags.save_images_num
        self.log_weight_image_num = 32

        # initialize variables
        self.name = self.get_model_name(model_name)
        self.batch_input = self.batch_num * [None]
        self.batch_input_quad = self.batch_num * [None]
        self.batch_true_quad = self.batch_num * [None]
        self.receptive_fields = 2 * self.layers + self.cnn_size - 2
        self.complexity = 0

        # initialize environment
        util.make_dir(self.checkpoint_dir)
        util.make_dir(flags.graph_dir)
        util.make_dir(self.tf_log_dir)
        if flags.initialise_tf_log:
            util.clean_dir(self.tf_log_dir)
        util.set_logging(flags.log_filename,
                         stream_log_level=logging.INFO,
                         file_log_level=logging.INFO,
                         tf_log_level=tf.logging.WARN)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        self.sess = tf.InteractiveSession(config=config)
        self.init_train_step()

        logging.info("\nDCSCN -------------------------------------")
        logging.info("%s [%s]" % (util.get_now_date(), self.name))