tl.files.assign_params(sess, load_params, net) print_("\tdone\n") if not os.path.exists(FLAGS.out_dir): os.makedirs(FLAGS.out_dir) print_("\nStarting prediction...\n\n") k = 0 for i in range(len(frames)): print("Frame %d: '%s'" % (i, frames[i])) try: # Read frame print_("\tReading...") # 读取LDR图像,resize到对应的尺度,并进行亮度缩放与截取,返回一个4维张量 x_buffer = img_io.readLDR(frames[i], (sy, sx), True, FLAGS.scaling) print_("\tdone") print_("\t(Saturation: %0.2f%%)\n" % (100.0 * (x_buffer >= 1).sum() / x_buffer.size), 'm') # Run prediction. # The gamma value is used to allow for boosting/reducing the intensity of # the reconstructed highlights. If y = f(x) is the reconstruction, the gamma # g alters this according to y = f(x^(1/g))^g print_("\tInference...") feed_dict = {x: np.power(np.maximum(x_buffer, 0.0), 1.0 / FLAGS.gamma)} y_predict = sess.run([y], feed_dict=feed_dict) y_predict = np.power(np.maximum(y_predict, 0.0), FLAGS.gamma) print_("\tdone\n") # Gamma corrected output 用伽马校正代替色调映射
img_path_list.sort() img_list_H = list() img_list = list() if is_upexposure_trained: img_order = range(len(img_path_list)) else: img_order = range(len(img_path_list) - 1, -1, -1) img_H_ = img_io.readHDR(img_path_list_H[0], (sy, sx)) img_list_H.append(np.squeeze(img_H_)) img_list_H = np.array(img_list_H) for j in img_order: img_path = img_path_list[j] img = img_io.readLDR(img_path, (sy, sx), True, FLAGS.scaling) img_list.append(np.squeeze(img)) img_list = np.array(img_list) for input_frame_id in range(len(img_list) - 1): start_frame_id = input_frame_id + 2 end_frame_id = min(start_frame_id + predicted_window_len, len(img_list)) x_batch = np.array([img_list[input_frame_id, :, :, :]]) y_batch_0 = img_list_H.reshape((1, ) + x_batch.shape[:]).astype( np.float32) y_batch_1 = np.array( [img_list[start_frame_id:end_frame_id, :, :, :]]) y_batch = np.concatenate([y_batch_0, y_batch_1], axis=1) dummy_len = predicted_window_len - y_batch.shape[1] zero_dummy = np.zeros(
start_time = time.clock() dir_path = dir_path_list[i] # frams = [glob.glob(dir_path + '/LDR/1.png')[0], glob.glob(dir_path + '/LDR/4.png')[0], glob.glob(dir_path + '/LDR/7.png')[0]] # used for DML, Fairchild, NewHDR datasets frams = [ glob.glob(dir_path + '/LDR/002.png')[0], glob.glob(dir_path + '/LDR/005.png')[0], glob.glob(dir_path + '/LDR/007.png')[0] ] # just only CanonCamera dataset filename_root = os.path.basename(dir_path) print('filename', filename_root) save_path = dir_outpath[0] + '/' + filename_root if not os.path.exists(save_path): os.makedirs(save_path) try: x_input_1 = img_io.readLDR(frams[0], (sy, sx), True, FLAGS.scaling) x_input_4 = img_io.readLDR(frams[1], (sy, sx), True, FLAGS.scaling) x_input_7 = img_io.readLDR(frams[2], (sy, sx), True, FLAGS.scaling) y_predict_dm_1 = sess_dm.run(pred_placehoder_dm, feed_dict={ x_dm: x_input_1 * 1, x_local_dm: x_input_1 * 1 }) y_predict_dm_4 = sess_dm.run(pred_placehoder_dm, feed_dict={ x_dm: x_input_4 * 1, x_local_dm: x_input_4 * 1 }) y_predict_dm_7 = sess_dm.run(pred_placehoder_dm, feed_dict={
frames = [FLAGS.im_dir] if os.path.isdir(FLAGS.im_dir): frames = [ os.path.join(FLAGS.im_dir, name) for name in sorted(os.listdir(FLAGS.im_dir)) if os.path.isfile(os.path.join(FLAGS.im_dir, name)) ] x = tf.placeholder(tf.float32, shape=[1, sy, sx, 3]) net = ldr2hdr.model(x) y = ldr2hdr.get_final(net, x) sess = tf.InteractiveSession() load_params = tl.files.load_npz(name=FLAGS.params) tl.files.assign_params(sess, load_params, net) if not os.path.exists(FLAGS.out_dir): os.makedirs(FLAGS.out_dir) k = 0 for i in range(len(frames)): x_buffer = img_io.readLDR(frames[i], (sy, sx), True, 1.0) y_predict = sess.run([y], feed_dict={x: x_buffer}) y_gamma = np.power(np.maximum(y_predict, 0.0), 0.5) k += 1 outname = './output' + frames[i][7:-4] + '.exr' img_io.writeEXR(y_predict, outname) sess.close()