def print_status(self, mse, psnr, log=False): if self.step == 0: logging.info("Initial MSE:%f PSNR:%f" % (mse, psnr)) else: processing_time = (time.time() - self.start_time) / self.step if self.use_l1_loss: line_a = "%s Step:%s MSE:%f PSNR:%f (Training Loss:%0.3f)" % ( util.get_now_date(), "{:,}".format(self.step), mse, psnr, self.training_loss_sum / self.training_step) else: line_a = "%s Step:%s MSE:%f PSNR:%f (Training PSNR:%0.3f)" % ( util.get_now_date(), "{:,}".format(self.step), mse, psnr, self.training_psnr_sum / self.training_step) estimated = processing_time * ( self.total_epochs - self.epochs_completed) * ( self.training_images // self.batch_num) h = estimated // (60 * 60) estimated -= h * 60 * 60 m = estimated // 60 s = estimated - m * 60 line_b = "Epoch:%d LR:%f (%2.3fsec/step) Estimated:%d:%d:%d" % ( self.epochs_completed, self.lr, processing_time, h, m, s) if log: logging.info(line_a) logging.info(line_b) else: print(line_a) print(line_b)
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name) model.train = model.load_dynamic_datasets( FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size, FLAGS.stride_size) model.test = model.load_datasets( FLAGS.data_dir + "/" + FLAGS.test_dataset, FLAGS.batch_dir + "/" + FLAGS.test_dataset, FLAGS.batch_image_size, FLAGS.stride_size) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) final_mse = final_psnr = 0 test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" + FLAGS.test_dataset) for i in range(FLAGS.tests): train(model, FLAGS, i) total_psnr = total_mse = 0 for filename in test_filenames: mse = model.do_for_evaluate(filename, FLAGS.output_dir, output=i is (FLAGS.tests - 1), print_console=False) total_mse += mse total_psnr += util.get_psnr(mse, max_value=FLAGS.max_value) logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("MSE:%f, PSNR:%f\n" % (total_mse / len(test_filenames), total_psnr / len(test_filenames))) final_mse += total_mse final_psnr += total_psnr logging.info("=== summary [%d] %s [%s] ===" % (FLAGS.tests, model.name, util.get_now_date())) util.print_num_of_total_parameters(output_to_logging=True) n = len(test_filenames) * FLAGS.tests logging.info("\n=== Final Average [%s] MSE:%f, PSNR:%f ===" % (FLAGS.test_dataset, final_mse / n, final_psnr / n)) model.copy_log_to_archive("archive")
def __init__(self, flags, model_name=""): super().__init__(flags) # Model Parameters self.scale = flags.scale self.layers = flags.layers self.filters = flags.filters self.min_filters = min(flags.filters, flags.min_filters) self.filters_decay_gamma = flags.filters_decay_gamma self.use_nin = flags.use_nin self.nin_filters = flags.nin_filters self.nin_filters2 = flags.nin_filters2 self.reconstruct_layers = max(flags.reconstruct_layers, 1) self.reconstruct_filters = flags.reconstruct_filters self.resampling_method = BICUBIC_METHOD_STRING self.pixel_shuffler = flags.pixel_shuffler self.pixel_shuffler_filters = flags.pixel_shuffler_filters self.self_ensemble = flags.self_ensemble # Image Processing Parameters self.max_value = flags.max_value self.channels = flags.channels self.output_channels = 1 self.psnr_calc_border_size = flags.psnr_calc_border_size if self.psnr_calc_border_size < 0: self.psnr_calc_border_size = 2 + self.scale # initialize variables self.name = self.get_model_name(model_name) self.total_epochs = 0 util.make_dir(self.checkpoint_dir) logging.info("\nDCSCN v2-------------------------------------") logging.info("%s [%s]" % (util.get_now_date(), self.name))
def __init__(self, flags, model_name=""): super().__init__(flags) # Model Parameters self.scale = flags.scale self.layers = flags.layers self.depth_wise_convolution = flags.depth_wise_convolution self.resampling_method = BICUBIC_METHOD_STRING self.self_ensemble = flags.self_ensemble # Training Parameters self.optimizer = flags.optimizer self.beta1 = flags.beta1 self.beta2 = flags.beta2 self.momentum = flags.momentum self.batch_num = flags.batch_num self.batch_image_size = flags.batch_image_size self.clipping_norm = flags.clipping_norm # Learning Rate Control for Training self.initial_lr = flags.initial_lr self.lr_decay = flags.lr_decay self.lr_decay_epoch = flags.lr_decay_epoch # Dataset or Others self.training_images = int( math.ceil(flags.training_images / flags.batch_num) * flags.batch_num) # Image Processing Parameters self.max_value = flags.max_value self.channels = flags.channels self.output_channels = flags.channels self.psnr_calc_border_size = flags.psnr_calc_border_size if self.psnr_calc_border_size < 0: self.psnr_calc_border_size = 2 + self.scale # initialize variables self.name = self.get_model_name(model_name) self.total_epochs = 0 lr = self.initial_lr while lr > flags.end_lr: self.total_epochs += self.lr_decay_epoch lr *= self.lr_decay # initialize environment util.make_dir(self.checkpoint_dir) util.make_dir(flags.graph_dir) util.make_dir(self.tf_log_dir) if flags.initialize_tf_log: util.clean_dir(self.tf_log_dir) util.set_logging(flags.log_filename, stream_log_level=logging.INFO, file_log_level=logging.INFO, tf_log_level=tf.logging.WARN) logging.info("\nLFFN-------------------------------------") logging.info("%s [%s]" % (util.get_now_date(), self.name)) self.init_train_step()
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = LFFN.SuperResolution(FLAGS, model_name=FLAGS.model_name) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.train_dir+FLAGS.data_train) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_mse = 0 for i in range(FLAGS.tests): mse = train(model, FLAGS, i) # begin train psnr = util.get_psnr(mse, max_value=FLAGS.max_value) total_mse += mse total_psnr += psnr logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("MSE:%f, PSNR:%f\n" % (mse, psnr)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] MSE:%f, PSNR:%f ===" % ( FLAGS.test_dataset, total_mse / FLAGS.tests, total_psnr / FLAGS.tests)) model.copy_log_to_archive("archive")
def print_status(self, mse): psnr = util.get_psnr(mse, max_value=self.max_value) if self.step == 0: print("Initial MSE:%f PSNR:%f" % (mse, psnr)) else: processing_time = (time.time() - self.start_time) / self.step print("%s Step:%d MSE:%f PSNR:%f (Training PSNR:%0.3f)" % ( util.get_now_date(), self.step, mse, psnr, self.training_psnr_sum / self.training_step)) print("Epoch:%d (Step:%s) LR:%f (%2.3fsec/step) MinPSNR:%0.3f" % ( self.epochs_completed, "{:,}".format(self.step), self.lr, processing_time, util.get_psnr(self.min_validation_mse)))
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name, is_module_training=True) # if FLAGS.build_batch: # model.load_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_dir + "/" + FLAGS.dataset, # FLAGS.batch_image_size, FLAGS.stride_size) # else: # model.load_dynamic_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size) if FLAGS.build_batch: # Not implemented for MISR logging.error("'build_batch' not implemented for MISR") raise NotImplementedError else: model.load_dynamic_datasets_misr( data_dir=FLAGS.data_dir, batch_image_size=FLAGS.batch_image_size, dataset_name=FLAGS.dataset) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_ssim = 0 for i in range(FLAGS.tests): psnr, ssim, cpsnr = train_misr(model, FLAGS, i) total_psnr += psnr total_ssim += ssim logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("PSNR:%f, SSIM:%f\n" % (psnr, ssim)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] PSNR:%f, SSIM:%f ===" % (FLAGS.test_dataset, total_psnr / FLAGS.tests, total_ssim / FLAGS.tests)) model.copy_log_to_archive("archive")
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name) # script allows you to split training images into batches in advance. if FLAGS.build_batch: model.load_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size, FLAGS.stride_size) else: model.load_dynamic_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_ssim = 0 for i in range(FLAGS.tests): psnr, ssim = train(model, FLAGS, i) total_psnr += psnr total_ssim += ssim logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("PSNR:%f, SSIM:%f\n" % (psnr, ssim)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] PSNR:%f, SSIM:%f ===" % (FLAGS.test_dataset, total_psnr / FLAGS.tests, total_ssim / FLAGS.tests)) model.copy_log_to_archive("archive")
def __init__(self, flags, model_name=""): super().__init__(flags) # Model Parameters self.layers = flags.layers self.filters = flags.filters self.min_filters = min(flags.filters, flags.min_filters) self.filters_decay_gamma = flags.filters_decay_gamma self.use_nin = flags.use_nin self.nin_filters = flags.nin_filters self.nin_filters2 = flags.nin_filters2 self.reconstruct_layers = max(flags.reconstruct_layers, 1) self.reconstruct_filters = flags.reconstruct_filters self.resampling_method = BICUBIC_METHOD_STRING self.pixel_shuffler = flags.pixel_shuffler self.self_ensemble = flags.self_ensemble # Training Parameters self.l2_decay = flags.l2_decay self.optimizer = flags.optimizer self.beta1 = flags.beta1 self.beta2 = flags.beta2 self.momentum = flags.momentum self.batch_num = flags.batch_num self.batch_image_size = flags.batch_image_size if flags.stride_size == 0: self.stride_size = flags.batch_image_size // 2 else: self.stride_size = flags.stride_size self.clipping_norm = flags.clipping_norm # Learning Rate Control for Training self.initial_lr = flags.initial_lr self.lr_decay = flags.lr_decay self.lr_decay_epoch = flags.lr_decay_epoch # Dataset or Others self.dataset = flags.dataset self.test_dataset = flags.test_dataset self.training_image_count = max( 1, (flags.training_images // flags.batch_num)) * flags.batch_num self.train = None self.test = None # Image Processing Parameters self.scale = flags.scale self.max_value = flags.max_value self.channels = flags.channels self.jpeg_mode = flags.jpeg_mode self.output_channels = 1 # Environment (all directory name should not contain '/' after ) self.batch_dir = flags.batch_dir # initialize variables self.name = self.get_model_name(model_name) self.total_epochs = 0 lr = self.initial_lr while lr > flags.end_lr: self.total_epochs += self.lr_decay_epoch lr *= self.lr_decay # initialize environment util.make_dir(self.checkpoint_dir) util.make_dir(flags.graph_dir) util.make_dir(self.tf_log_dir) if flags.initialise_tf_log: util.clean_dir(self.tf_log_dir) util.set_logging(flags.log_filename, stream_log_level=logging.INFO, file_log_level=logging.INFO, tf_log_level=tf.logging.WARN) logging.info("\nDCSCN v2-------------------------------------") logging.info("%s [%s]" % (util.get_now_date(), self.name)) self.init_train_step()
def __init__(self, flags, model_name=""): super().__init__(flags) # Model Parameters self.scale = flags.scale self.layers = flags.layers self.filters = flags.filters self.min_filters = min(flags.filters, flags.min_filters) self.filters_decay_gamma = flags.filters_decay_gamma self.use_nin = flags.use_nin self.nin_filters = flags.nin_filters self.nin_filters2 = flags.nin_filters2 self.reconstruct_layers = max(flags.reconstruct_layers, 1) self.reconstruct_filters = flags.reconstruct_filters self.resampling_method = flags.resampling_method self.pixel_shuffler = flags.pixel_shuffler self.pixel_shuffler_filters = flags.pixel_shuffler_filters self.self_ensemble = flags.self_ensemble self.depthwise_seperable = flags.depthwise_seperable self.bottleneck = flags.bottleneck # Training Parameters self.l2_decay = flags.l2_decay self.optimizer = flags.optimizer self.beta1 = flags.beta1 self.beta2 = flags.beta2 self.epsilon = flags.epsilon self.momentum = flags.momentum self.batch_num = flags.batch_num self.batch_image_size = flags.batch_image_size if flags.stride_size == 0: self.stride_size = flags.batch_image_size // 2 else: self.stride_size = flags.stride_size self.clipping_norm = flags.clipping_norm self.use_l1_loss = flags.use_l1_loss # Learning Rate Control for Training self.initial_lr = flags.initial_lr self.lr_decay = flags.lr_decay self.lr_decay_epoch = flags.lr_decay_epoch # Dataset or Others self.training_images = int( math.ceil(flags.training_images / flags.batch_num) * flags.batch_num) self.train = None self.test = None self.gpu_device_id = flags.gpu_device_id # Image Processing Parameters self.max_value = flags.max_value self.channels = flags.channels self.output_channels = 1 self.psnr_calc_border_size = flags.psnr_calc_border_size if self.psnr_calc_border_size < 0: self.psnr_calc_border_size = self.scale self.input_image_width = flags.input_image_width self.input_image_height = flags.input_image_height # Environment (all directory name should not contain tailing '/' ) self.batch_dir = flags.batch_dir # initialize variables self.name = self.get_model_name(model_name, name_postfix=flags.name_postfix) self.total_epochs = 0 lr = self.initial_lr while lr > flags.end_lr: self.total_epochs += self.lr_decay_epoch lr *= self.lr_decay # initialize environment util.make_dir(self.checkpoint_dir) util.make_dir(flags.graph_dir) util.make_dir(self.tf_log_dir) if flags.initialize_tf_log: util.clean_dir(self.tf_log_dir) util.set_logging(flags.log_filename, stream_log_level=logging.INFO, file_log_level=logging.INFO, tf_log_level=tf.logging.WARN) logging.info("\nDCSCN v2-------------------------------------") logging.info("%s [%s]" % (util.get_now_date(), self.name)) self.init_train_step()
def __init__(self, flags, model_name=""): # Model Parameters self.filters = flags.filters self.min_filters = flags.min_filters self.nin_filters = flags.nin_filters self.nin_filters2 = flags.nin_filters2 if flags.nin_filters2 != 0 else flags.nin_filters // 2 self.cnn_size = flags.cnn_size self.last_cnn_size = flags.last_cnn_size self.cnn_stride = 1 self.layers = flags.layers self.nin = flags.nin self.bicubic_init = flags.bicubic_init self.dropout = flags.dropout self.activator = flags.activator self.filters_decay_gamma = flags.filters_decay_gamma # Training Parameters self.initializer = flags.initializer self.weight_dev = flags.weight_dev self.l2_decay = flags.l2_decay self.optimizer = flags.optimizer self.beta1 = flags.beta1 self.beta2 = flags.beta2 self.momentum = flags.momentum self.batch_num = flags.batch_num self.batch_image_size = flags.batch_image_size if flags.stride_size == 0: self.stride_size = flags.batch_image_size // 2 else: self.stride_size = flags.stride_size # Learning Rate Control for Training self.initial_lr = flags.initial_lr self.lr_decay = flags.lr_decay self.lr_decay_epoch = flags.lr_decay_epoch # Dataset or Others self.dataset = flags.dataset self.test_dataset = flags.test_dataset # Image Processing Parameters self.scale = flags.scale self.max_value = flags.max_value self.channels = flags.channels self.jpeg_mode = flags.jpeg_mode self.output_channels = self.scale * self.scale # Environment (all directory name should not contain '/' after ) self.checkpoint_dir = flags.checkpoint_dir self.tf_log_dir = flags.tf_log_dir # Debugging or Logging self.debug = flags.debug self.save_loss = flags.save_loss self.save_weights = flags.save_weights self.save_images = flags.save_images self.save_images_num = flags.save_images_num self.log_weight_image_num = 32 # initialize variables self.name = self.get_model_name(model_name) self.batch_input = self.batch_num * [None] self.batch_input_quad = self.batch_num * [None] self.batch_true_quad = self.batch_num * [None] self.receptive_fields = 2 * self.layers + self.cnn_size - 2 self.complexity = 0 # initialize environment util.make_dir(self.checkpoint_dir) util.make_dir(flags.graph_dir) util.make_dir(self.tf_log_dir) if flags.initialise_tf_log: util.clean_dir(self.tf_log_dir) util.set_logging(flags.log_filename, stream_log_level=logging.INFO, file_log_level=logging.INFO, tf_log_level=tf.logging.WARN) config = tf.ConfigProto() config.gpu_options.allow_growth = True self.sess = tf.InteractiveSession(config=config) self.init_train_step() logging.info("\nDCSCN -------------------------------------") logging.info("%s [%s]" % (util.get_now_date(), self.name))