swaps = np.reshape(np.random.randint(0, 2, batch_size), [batch_size, 1]) phone_images = test_data[be:en] dslr_images = test_answ[be:en] [enhanced_crops, accuracy_disc, lg, lcon, lcol, ltex, ltv, lpsnr] = sess.run([enhanced, discim_accuracy, \ loss_generator, loss_content, loss_color, loss_texture, loss_tv, loss_psnr], \ feed_dict={phone_: phone_images, dslr_: dslr_images, adv_: swaps}) losses = np.asarray([lg, lcon, lcol, ltex, ltv, lpsnr]) test_losses_gen += np.asarray(losses) / num_test_batches test_accuracy_disc += accuracy_disc / num_test_batches loss_ssim += MultiScaleSSIM( np.reshape(dslr_images * 255, [batch_size, PATCH_HEIGHT, PATCH_WIDTH, 3]), enhanced_crops * 255) / num_test_batches logs_disc = "step %d, %s | discriminator accuracy | train: %.4g, test: %.4g " % \ (i, phone, train_acc_discrim, test_accuracy_disc) logs_gen = "generator losses | train: %.4g, test: %.4g | content: %.4g, color: %.4g, texture: %.4g, tv: %.4g | psnr: %.4g, ssim: %.4g\n" % \ (train_loss_gen, test_losses_gen[0][0], test_losses_gen[0][1], test_losses_gen[0][2], test_losses_gen[0][3], test_losses_gen[0][4], test_losses_gen[0][5], loss_ssim) print(logs_disc) print(logs_gen) # save the results to log file logs = open('models/' + phone + '.txt', "a")
def main(args): setproctitle.setproctitle('hdrnet_run') inputs = get_input_list(args.input) # -------- Load params ---------------------------------------------------- config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: checkpoint_path = tf.train.latest_checkpoint(args.checkpoint_dir) if checkpoint_path is None: log.error('Could not find a checkpoint in {}'.format(args.checkpoint_dir)) return # -------- Setup graph ---------------------------------------------------- tf.reset_default_graph() t_fullres_input = tf.placeholder(tf.float32, (1, width, heighth, 3)) target = tf.placeholder(tf.float32, (1, width, heighth, 3)) t_lowres_input = utils.blur(5,t_fullres_input) img_low = tf.image.resize_images( t_lowres_input, [width/args.scale, heighth/args.scale], method=tf.image.ResizeMethod.BICUBIC) img_high = utils.Getfilter(5,t_fullres_input) with tf.variable_scope('inference'): prediction = models.Resnet(img_low,img_high,t_fullres_input) ssim = MultiScaleSSIM(target,prediction) psnr = metrics.psnr(target, prediction) saver = tf.train.Saver() start = time.clock() with tf.Session(config=config) as sess: log.info('Restoring weights from {}'.format(checkpoint_path)) saver.restore(sess, checkpoint_path) SSIM = 0 PSNR = 0 for idx, input_path in enumerate(inputs): target_path = args.target + input_path.split('/')[2] log.info("Processing {}".format(input_path,target_path)) im_input = cv2.imread(input_path, -1) # -1 means read as is, no conversions. im_target = cv2.imread(target_path, -1) if im_input.shape[2] == 4: log.info("Input {} has 4 channels, dropping alpha".format(input_path)) im_input = im_input[:, :, :3] im_target = im_target[:, :, :3] im_input = np.flip(im_input, 2) # OpenCV reads BGR, convert back to RGB. im_target = np.flip(im_target, 2) im_input = skimage.img_as_float(im_input) im_target = skimage.img_as_float(im_target) im_input = im_input[np.newaxis, :, :, :] im_target = im_target[np.newaxis, :, :, :] feed_dict = { t_fullres_input: im_input, target:im_target } ssim1,psnr1 = sess.run([ssim,psnr], feed_dict=feed_dict) SSIM = SSIM + ssim1 PSNR = PSNR + psnr1 if idx>=1000: break print("SSIM:%s,PSNR:%s"%(SSIM/1000,PSNR/1000)) end = time.clock() print("耗时%s秒"%str(end-start))
k_imgs.append(k_img) k_imgs = np.array(k_imgs) k_imgs = np.expand_dims(k_imgs,axis=3) k_imgs = np.concatenate((k_imgs,k_imgs,k_imgs),axis=-1) k_imgs = np.float16(k_imgs)/255 with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess,"saved_parameter/artery/blackberry_iteration_9000.ckpt") real_count = len(k_imgs)/batch_size count = int(np.ceil(real_count)) for i in range(count): if (real_count-i)>0 and (real_count-i)<1: batch_data = k_imgs[i*batch_size:k_imgs.shape[0]] else: batch_data = k_imgs[i*batch_size:(i+1)*batch_size] enhanced3d = sess.run(enhanced,feed_dict={x:batch_data}) enhanced3d = list(enhanced3d) enhanced4d.append(enhanced3d) enhanced4d = np.concatenate((enhanced4d[0],enhanced4d[1]),axis=0) ssmi = MultiScaleSSIM(originals,enhanced4d*255) loss_mse = np.mean(np.power(originals[0].flatten() - enhanced4d[0].flatten()*255, 2)) loss_psnr = 10 * np.log10(255.0**2 / loss_mse) print(loss_psnr) print(ssmi)
image_phone = np.reshape( image_phone, [1, image_phone.shape[0], image_phone.shape[1], 3]) / 255 image_dslr = np.reshape( image_dslr, [1, image_dslr.shape[0], image_dslr.shape[1], 3]) / 255 [psnr, enhanced] = sess.run([psnr_, out_], feed_dict={ x_: image_phone, y_: image_dslr }) psnr_score += psnr / num_val_images ssim_score += MultiScaleSSIM(image_dslr * 255, enhanced * 255) / num_val_images print("\r\r\r") print("Scores | PSNR: %.4g, MS-SSIM: %.4g" % (psnr_score, ssim_score)) PSNR_solution = psnr_score SSIM_solution = ssim_score print("\n-------------------------------------\n") sess.close() if compute_running_time: ############################## # 3 Computing running time # ############################## print("Evaluating model speed")
def main(args, data_params): procname = os.path.basename(args.checkpoint_dir) #setproctitle.setproctitle('hdrnet_{}'.format(procname)) log.info('Preparing summary and checkpoint directory {}'.format( args.checkpoint_dir)) if not os.path.exists(args.checkpoint_dir): os.makedirs(args.checkpoint_dir) tf.set_random_seed(1234) # Make experiments repeatable # Select an architecture # Add model parameters to the graph (so they are saved to disk at checkpoint) # --- Train/Test datasets --------------------------------------------------- data_pipe = getattr(dp, args.data_pipeline) with tf.variable_scope('train_data'): train_data_pipeline = data_pipe( args.data_dir, shuffle=True, batch_size=args.batch_size, nthreads=args.data_threads, fliplr=args.fliplr, flipud=args.flipud, rotate=args.rotate, random_crop=args.random_crop, params=data_params, output_resolution=args.output_resolution,scale=args.scale) train_samples = train_data_pipeline.samples train_samples['high_input'] = Getfilter(5,train_samples['image_input']) train_samples['lowres_input1'] = blur(5,train_samples['lowres_input']) train_samples['low_input'] = tf.image.resize_images(train_samples['lowres_input1'], [args.output_resolution[0]/args.scale, args.output_resolution[1]/args.scale], method = tf.image.ResizeMethod.BICUBIC) if args.eval_data_dir is not None: with tf.variable_scope('eval_data'): eval_data_pipeline = data_pipe( args.eval_data_dir, shuffle=False, batch_size=1, nthreads=1, fliplr=False, flipud=False, rotate=False, random_crop=False, params=data_params, output_resolution=args.output_resolution,scale=args.scale) eval_samples = train_data_pipeline.samples # --------------------------------------------------------------------------- swaps = np.reshape(np.random.randint(0, 2, args.batch_size), [args.batch_size, 1]) swaps = tf.convert_to_tensor(swaps) swaps = tf.cast(swaps, tf.float32) swaps1 = np.reshape(np.random.randint(0, 2, args.batch_size), [args.batch_size, 1]) swaps1 = tf.convert_to_tensor(swaps1) swaps1 = tf.cast(swaps1, tf.float32) # Training graph with tf.variable_scope('inference'): prediction = models.Resnet(train_samples['low_input'],train_samples['high_input'],train_samples['image_input']) loss,loss_content,loss_color,loss_filter,loss_texture,loss_tv,discim_accuracy,discim_accuracy1 =\ metrics.l2_loss(train_samples['image_output'], prediction, swaps, swaps1, args.batch_size) psnr = metrics.psnr(train_samples['image_output'], prediction) loss_ssim = MultiScaleSSIM(train_samples['image_output'],prediction) # Evaluation graph if args.eval_data_dir is not None: with tf.name_scope('eval'): with tf.variable_scope('inference', reuse=True): eval_prediction = models.Resnet( eval_samples['low_input'],eval_samples['high_input'],eval_samples['image_input']) eval_psnr = metrics.psnr(eval_samples['image_output'], eval_prediction) # Optimizer model_vars = [v for v in tf.global_variables() if not v.name.startswith("inference/l2_loss/discriminator") or v.name.startswith("inference/l2_loss/discriminator1")] discriminator_vars = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")] discriminator_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator1")] global_step = tf.contrib.framework.get_or_create_global_step() with tf.name_scope('optimizer'): update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) updates = tf.group(*update_ops, name='update_ops') log.info("Adding {} update ops".format(len(update_ops))) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) if reg_losses and args.weight_decay is not None and args.weight_decay > 0: print("Regularization losses:") for rl in reg_losses: print(" ", rl.name) opt_loss = loss + args.weight_decay*sum(reg_losses) else: print("No regularization.") opt_loss = loss with tf.control_dependencies([updates]): opt = tf.train.AdamOptimizer(args.learning_rate) minimize = opt.minimize(opt_loss, name='optimizer', global_step=global_step,var_list=model_vars) minimize1 = opt.minimize(-loss_filter, name='optimizer1', global_step=global_step,var_list=discriminator_vars) minimize2 = opt.minimize(-loss_texture, name='optimizer2', global_step=global_step, var_list=discriminator_vars1) # Average loss and psnr for display with tf.name_scope("moving_averages"): ema = tf.train.ExponentialMovingAverage(decay=0.99) update_ma = ema.apply([loss,loss_content,loss_color,loss_filter,loss_texture,loss_tv,discim_accuracy,discim_accuracy1,psnr,loss_ssim]) loss = ema.average(loss) loss_content=ema.average(loss_content) loss_color=ema.average(loss_color) loss_filter=ema.average(loss_filter) loss_texture=ema.average(loss_texture) loss_tv=ema.average(loss_tv) discim_accuracy = ema.average(discim_accuracy) discim_accuracy1 = ema.average(discim_accuracy1) psnr = ema.average(psnr) loss_ssim = ema.average(loss_ssim) # Training stepper operation train_op = tf.group(minimize,minimize1,minimize2,update_ma) # Save a few graphs to tensorboard summaries = [ tf.summary.scalar('loss', loss), tf.summary.scalar('loss_content',loss_content), tf.summary.scalar('loss_color',loss_color), tf.summary.scalar('loss_filter', loss_filter), tf.summary.scalar('loss_texture', loss_texture), tf.summary.scalar('loss_tv', loss_tv), tf.summary.scalar('discim_accuracy',discim_accuracy), tf.summary.scalar('discim_accuracy1', discim_accuracy1), tf.summary.scalar('psnr', psnr), tf.summary.scalar('ssim', loss_ssim), tf.summary.scalar('learning_rate', args.learning_rate), tf.summary.scalar('batch_size', args.batch_size), ] log_fetches = { "loss_content":loss_content, "loss_color":loss_color, "loss_filter":loss_filter, "loss_texture": loss_texture, "loss_tv":loss_tv, "discim_accuracy":discim_accuracy, "discim_accuracy1": discim_accuracy1, "step": global_step, "loss": loss, "psnr": psnr, "loss_ssim":loss_ssim} model_vars = [v for v in tf.global_variables() if not v.name.startswith("inference/l2_loss/discriminator" or "inference/l2_loss/discriminator1")] discriminator_vars = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator")] discriminator_vars1 = [v for v in tf.global_variables() if v.name.startswith("inference/l2_loss/discriminator1")] # Train config config = tf.ConfigProto() config.gpu_options.allow_growth = True # Do not canibalize the entire GPU sv = tf.train.Supervisor( local_init_op=tf.initialize_variables(discriminator_vars), saver=tf.train.Saver(var_list=model_vars,max_to_keep=100), logdir=args.checkpoint_dir, save_summaries_secs=args.summary_interval, save_model_secs=args.checkpoint_interval) # Train loop with sv.managed_session(config=config) as sess: sv.loop(args.log_interval, log_hook, (sess,log_fetches)) last_eval = time.time() while True: if sv.should_stop(): log.info("stopping supervisor") break try: step, _ = sess.run([global_step, train_op]) since_eval = time.time()-last_eval if args.eval_data_dir is not None and since_eval > args.eval_interval: log.info("Evaluating on {} images at step {}".format( eval_data_pipeline.nsamples, step)) p_ = 0 eval_data_pipeline.nsamples = 3 for it in range(eval_data_pipeline.nsamples): p_ += sess.run(eval_psnr) p_ /= eval_data_pipeline.nsamples sv.summary_writer.add_summary(tf.Summary(value=[ tf.Summary.Value(tag="psnr/eval", simple_value=p_)]), global_step=step) log.info(" Evaluation PSNR = {:.1f} dB".format(p_)) last_eval = time.time() except tf.errors.AbortedError: log.error("Aborted") break except KeyboardInterrupt: break chkpt_path = os.path.join(args.checkpoint_dir, 'on_stop.ckpt') log.info("Training complete, saving chkpt {}".format(chkpt_path)) sv.saver.save(sess, chkpt_path) sv.request_stop()
def test_generator(self, test_num_patch=200, test_num_image=5, load=False): if load == True: if self.load(): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") # test for patches start = time.time() test_list_phone = sorted(glob(self.config.test_path_phone_patch)) PSNR_phone_reconstructed_list = np.zeros([test_num_patch]) PSNR_phone_enhanced_list = np.zeros([test_num_patch]) SSIM_phone_reconstructed_list = np.zeros([test_num_patch]) SSIM_phone_enhanced_list = np.zeros([test_num_patch]) indexes = [] for i in range(test_num_patch): index = np.random.randint(len(test_list_phone)) indexes.append(index) test_patch_phone = preprocess( scipy.misc.imread(test_list_phone[index], mode="RGB").astype("float32")) test_patch_enhanced, test_patch_reconstructed = self.sess.run( [self.enhanced_test, self.reconstructed_test], feed_dict={self.phone_test: [test_patch_phone]}) if i % 50 == 0: imageio.imwrite(("./samples_DIV2K/%s/patch/phone_%d.png" % (self.config.dataset_name, i)), postprocess(test_patch_phone)) imageio.imwrite(("./samples_DIV2K/%s/patch/enhanced_%d.png" % (self.config.dataset_name, i)), postprocess(test_patch_enhanced[0])) imageio.imwrite( ("./samples_DIV2K/%s/patch/reconstructed_%d.png" % (self.config.dataset_name, i)), postprocess(test_patch_reconstructed[0])) #print(enhanced_test_patch.shape) PSNR = calc_PSNR(postprocess(test_patch_enhanced[0]), postprocess(test_patch_phone)) SSIM = MultiScaleSSIM(postprocess(test_patch_enhanced[0]), postprocess(test_patch_phone)) #print("PSNR: %.3f" %PSNR) PSNR_phone_enhanced_list[i] = PSNR SSIM_phone_enhanced_list[i] = SSIM PSNR = calc_PSNR(postprocess(test_patch_reconstructed[0]), postprocess(test_patch_phone)) SSIM = MultiScaleSSIM(postprocess(test_patch_reconstructed[0]), postprocess(test_patch_phone)) #print("PSNR: %.3f" %PSNR) PSNR_phone_reconstructed_list[i] = PSNR SSIM_phone_reconstructed_list[i] = SSIM print( "(runtime: %.3f s) Average test PSNR for %d random test image patches: phone-enhanced %.3f, phone-reconstructed %.3f" % (time.time() - start, test_num_patch, np.mean(PSNR_phone_enhanced_list), np.mean(PSNR_phone_reconstructed_list))) print( "Average test SSIM for %d random test image patches: phone-enhanced %.3f, phone-reconstructed %.3f" % (test_num_patch, np.mean(SSIM_phone_enhanced_list), np.mean(SSIM_phone_reconstructed_list))) # test for images start = time.time() test_list_phone = sorted(glob(self.config.test_path_phone_image)) PSNR_phone_enhanced_list = np.zeros([test_num_image]) PSNR_phone_reconstructed_list = np.zeros([test_num_image]) SSIM_phone_reconstructed_list = np.zeros([test_num_image]) SSIM_phone_enhanced_list = np.zeros([test_num_image]) indexes = [] for i in range(test_num_image): #index = np.random.randint(len(test_list_phone)) index = i indexes.append(index) test_image_phone = preprocess( scipy.misc.imread(test_list_phone[index], mode="RGB").astype("float32")) test_image_enhanced, test_image_reconstructed = self.sess.run( [self.enhanced_test_unknown, self.reconstructed_test_unknown], feed_dict={self.phone_test_unknown: [test_image_phone]}) imageio.imwrite(("./samples_DIV2K/%s/image/phone_%d.png" % (self.config.dataset_name, i)), postprocess(test_image_phone)) imageio.imwrite(("./samples_DIV2K/%s/image/enhanced_%d.png" % (self.config.dataset_name, i)), postprocess(test_image_enhanced[0])) imageio.imwrite(("./samples_DIV2K/%s/image/reconstructed_%d.png" % (self.config.dataset_name, i)), postprocess(test_image_reconstructed[0])) PSNR = calc_PSNR(postprocess(test_image_enhanced[0]), postprocess(test_image_phone)) SSIM = MultiScaleSSIM(postprocess(test_image_enhanced[0]), postprocess(test_image_phone)) #print("PSNR: %.3f" %PSNR) PSNR_phone_enhanced_list[i] = PSNR SSIM_phone_enhanced_list[i] = SSIM PSNR = calc_PSNR(postprocess(test_image_reconstructed[0]), postprocess(test_image_phone)) SSIM = MultiScaleSSIM(postprocess(test_image_reconstructed[0]), postprocess(test_image_phone)) PSNR_phone_reconstructed_list[i] = PSNR SSIM_phone_reconstructed_list[i] = SSIM if test_num_image > 0: print( "(runtime: %.3f s) Average test PSNR for %d random full test images: original-enhanced %.3f, original-reconstructed %.3f" % (time.time() - start, test_num_image, np.mean(PSNR_phone_enhanced_list), np.mean(PSNR_phone_reconstructed_list))) print( "Average test SSIM for %d random full test images: original-enhanced %.3f, original-reconstructed %.3f" % (test_num_image, np.mean(SSIM_phone_enhanced_list), np.mean(SSIM_phone_reconstructed_list)))