def build_optimizer(self): self.lr_input = tf.placeholder(tf.float32, shape=[], name="LearningRate") diff = self.y_ - self.y mse = tf.reduce_mean(tf.square(diff), name="mse") if self.debug: mse = tf.Print(mse, [mse], message="MSE: ") loss = mse if self.l2_decay > 0: l2_losses = [tf.nn.l2_loss(w) for w in self.weights] l2_loss = self.l2_decay * tf.add_n(l2_losses) loss += l2_loss if self.save_loss: tf.summary.scalar("l2_loss/" + self.name, l2_loss) if self.save_loss: tf.summary.scalar("test_PSNR/" + self.name, self.get_psnr_tensor(mse)) tf.summary.scalar("test_loss/" + self.name, loss) self.loss = loss self.mse = mse self.training_optimizer = self.add_optimizer_op(loss, self.lr_input) util.print_num_of_total_parameters(output_detail=True)
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = LFFN.SuperResolution(FLAGS, model_name=FLAGS.model_name) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.train_dir+FLAGS.data_train) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_mse = 0 for i in range(FLAGS.tests): mse = train(model, FLAGS, i) # begin train psnr = util.get_psnr(mse, max_value=FLAGS.max_value) total_mse += mse total_psnr += psnr logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("MSE:%f, PSNR:%f\n" % (mse, psnr)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] MSE:%f, PSNR:%f ===" % ( FLAGS.test_dataset, total_mse / FLAGS.tests, total_psnr / FLAGS.tests)) model.copy_log_to_archive("archive")
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name) model.train = model.load_dynamic_datasets( FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size, FLAGS.stride_size) model.test = model.load_datasets( FLAGS.data_dir + "/" + FLAGS.test_dataset, FLAGS.batch_dir + "/" + FLAGS.test_dataset, FLAGS.batch_image_size, FLAGS.stride_size) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) final_mse = final_psnr = 0 test_filenames = util.get_files_in_directory(FLAGS.data_dir + "/" + FLAGS.test_dataset) for i in range(FLAGS.tests): train(model, FLAGS, i) total_psnr = total_mse = 0 for filename in test_filenames: mse = model.do_for_evaluate(filename, FLAGS.output_dir, output=i is (FLAGS.tests - 1), print_console=False) total_mse += mse total_psnr += util.get_psnr(mse, max_value=FLAGS.max_value) logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("MSE:%f, PSNR:%f\n" % (total_mse / len(test_filenames), total_psnr / len(test_filenames))) final_mse += total_mse final_psnr += total_psnr logging.info("=== summary [%d] %s [%s] ===" % (FLAGS.tests, model.name, util.get_now_date())) util.print_num_of_total_parameters(output_to_logging=True) n = len(test_filenames) * FLAGS.tests logging.info("\n=== Final Average [%s] MSE:%f, PSNR:%f ===" % (FLAGS.test_dataset, final_mse / n, final_psnr / n)) model.copy_log_to_archive("archive")
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name, is_module_training=True) # if FLAGS.build_batch: # model.load_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_dir + "/" + FLAGS.dataset, # FLAGS.batch_image_size, FLAGS.stride_size) # else: # model.load_dynamic_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size) if FLAGS.build_batch: # Not implemented for MISR logging.error("'build_batch' not implemented for MISR") raise NotImplementedError else: model.load_dynamic_datasets_misr( data_dir=FLAGS.data_dir, batch_image_size=FLAGS.batch_image_size, dataset_name=FLAGS.dataset) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_ssim = 0 for i in range(FLAGS.tests): psnr, ssim, cpsnr = train_misr(model, FLAGS, i) total_psnr += psnr total_ssim += ssim logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("PSNR:%f, SSIM:%f\n" % (psnr, ssim)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] PSNR:%f, SSIM:%f ===" % (FLAGS.test_dataset, total_psnr / FLAGS.tests, total_ssim / FLAGS.tests)) model.copy_log_to_archive("archive")
def build_optimizer(self): """ Build loss function. We use 6+scale as a border and we don't calculate MSE on the border. """ self.lr_input = tf.placeholder(tf.float32, shape=[], name="LearningRate") diff = tf.subtract(self.y_, self.y, "diff") if self.use_l1_loss: self.mse = tf.reduce_mean(tf.square(diff, name="diff_square"), name="mse") self.image_loss = tf.reduce_mean(tf.abs(diff, name="diff_abs"), name="image_loss") else: self.mse = tf.reduce_mean(tf.square(diff, name="diff_square"), name="mse") self.image_loss = tf.identity(self.mse, name="image_loss") if self.l2_decay > 0: l2_norm_losses = [tf.nn.l2_loss(w) for w in self.Weights] l2_norm_loss = self.l2_decay * tf.add_n(l2_norm_losses) if self.enable_log: tf.summary.scalar("L2WeightDecayLoss/" + self.name, l2_norm_loss) self.loss = self.image_loss + l2_norm_loss else: self.loss = self.image_loss if self.enable_log: tf.summary.scalar("Loss/" + self.name, self.loss) if self.batch_norm: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.training_optimizer = self.add_optimizer_op( self.loss, self.lr_input) else: self.training_optimizer = self.add_optimizer_op( self.loss, self.lr_input) util.print_num_of_total_parameters(output_detail=True)
def build_optimizer(self): """ Build loss function. We use 6+scale as a border and we don't calculate MSE on the border. """ self.lr_input = tf.placeholder(tf.float32, shape=[], name="LearningRate") # with tf.variable_scope("CropDiff"): diff = self.y_ - self.y # if self.psnr_calc_border_size > 0: # offset = self.psnr_calc_border_size # size = self.batch_image_size * self.scale - 2 * self.psnr_calc_border_size # diff = tf.image.crop_to_bounding_box(diff, offset, offset, size, size) self.mse = tf.reduce_mean(tf.square(diff), name="mse") loss = self.mse if self.l2_decay > 0: l2_losses = [tf.nn.l2_loss(w) for w in self.Weights] # l1_losses = [tf.reduce_sum(tf.abs(w)) for w in self.weights] # l1 loss l2_loss = self.l2_decay * tf.add_n(l2_losses) if self.save_loss: tf.summary.scalar("loss_l2/" + self.name, l2_loss) loss += l2_loss if self.save_loss: tf.summary.scalar("loss/" + self.name, loss) self.loss = loss if self.batch_norm: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.training_optimizer = self.add_optimizer_op( loss, self.lr_input) else: self.training_optimizer = self.add_optimizer_op( loss, self.lr_input) util.print_num_of_total_parameters(output_detail=True)
def build_optimizer(self): """ Build loss function. """ self.lr_input = tf.placeholder(tf.float32, shape=[], name="LearningRate") diff = self.y_ - self.y self.mae = tf.reduce_mean(tf.abs(diff), name="mse") self.loss = self.mae if self.save_loss: tf.summary.scalar("loss/" + self.name, self.loss) self.training_optimizer = self.add_optimizer_op( self.loss, self.lr_input) util.print_num_of_total_parameters(output_detail=True)
def main(not_parsed_args): if len(not_parsed_args) > 1: print("Unknown args:%s" % not_parsed_args) exit() model = DCSCN.SuperResolution(FLAGS, model_name=FLAGS.model_name) # script allows you to split training images into batches in advance. if FLAGS.build_batch: model.load_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size, FLAGS.stride_size) else: model.load_dynamic_datasets(FLAGS.data_dir + "/" + FLAGS.dataset, FLAGS.batch_image_size) model.build_graph() model.build_optimizer() model.build_summary_saver() logging.info("\n" + str(sys.argv)) logging.info("Test Data:" + FLAGS.test_dataset + " Training Data:" + FLAGS.dataset) util.print_num_of_total_parameters(output_to_logging=True) total_psnr = total_ssim = 0 for i in range(FLAGS.tests): psnr, ssim = train(model, FLAGS, i) total_psnr += psnr total_ssim += ssim logging.info("\nTrial(%d) %s" % (i, util.get_now_date())) model.print_steps_completed(output_to_logging=True) logging.info("PSNR:%f, SSIM:%f\n" % (psnr, ssim)) if FLAGS.tests > 1: logging.info("\n=== Final Average [%s] PSNR:%f, SSIM:%f ===" % (FLAGS.test_dataset, total_psnr / FLAGS.tests, total_ssim / FLAGS.tests)) model.copy_log_to_archive("archive")
def build_optimizer(self): """ Build loss function. We use 6+scale as a border and we don't calculate MSE on the border. """ self.lr_input = tf.placeholder(tf.float32, shape=[], name="LearningRate") diff = self.y_ - self.y if self.use_l1_loss: self.loss = tf.reduce_mean(tf.abs(diff), name="loss") self.mse = tf.reduce_mean(tf.square(diff), name="mse") else: self.mse = tf.reduce_mean(tf.square(diff), name="mse") self.loss = self.mse self.total_loss = self.loss if self.l2_decay > 0: l2_losses = [tf.nn.l2_loss(w) for w in self.Weights] # l1_losses = [tf.reduce_sum(tf.abs(w)) for w in self.weights] # l1 loss l2_loss = self.l2_decay * tf.add_n(l2_losses) if self.save_loss: tf.summary.scalar("loss_l2/" + self.name, l2_loss) self.total_loss += l2_loss if self.save_loss: tf.summary.scalar("loss/" + self.name, self.total_loss) if self.batch_norm: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): self.training_optimizer = self.add_optimizer_op(self.total_loss, self.lr_input) else: self.training_optimizer = self.add_optimizer_op(self.total_loss, self.lr_input) util.print_num_of_total_parameters(output_detail=True)