save_path = saver.save( sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch)) print('Model saved in file: % s' % save_path) # sample if (it + 1) % 100 == 0: a_real_ipt = a_test_pool.batch() b_real_ipt = b_test_pool.batch() [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={ a_real: a_real_ipt, b_real: b_real_ipt }) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) save_dir = './outputs/sample_images_while_training/' + dataset utils.mkdir(save_dir) im.imwrite( im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch)) except: save_path = saver.save( sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch)) print('Model saved in file: % s' % save_path) sess.close()
epoch = it // batch_epoch it_epoch = it % batch_epoch + 1 # display if it % 1 == 0: print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch)) # save if (it + 1) % 1000 == 0: save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch)) print('Model saved in file: % s' % save_path) # sample if (it + 1) % 100 == 0: a_real_ipt = a_test_pool.batch() b_real_ipt = b_test_pool.batch() [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt}) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) save_dir = './sample_images_while_training/' + dataset utils.mkdir(save_dir + '/') im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch)) except Exception, e: coord.request_stop(e) finally: print("Stop threads and close session!") coord.request_stop() coord.join(threads) sess.close()
assert direction == 'a2b' or direction == 'b2a', 'Direction should be a2b or b2a!' """ run """ frames = [] a_reals_ipt_ori = gif_frames(Image.open(gif_path)) size_ori = a_reals_ipt_ori.shape[0:3] with tf.Session() as sess: a_real = tf.placeholder(tf.float32, shape=[None, crop_size, crop_size, crop_size, 3]) a2b = models.generator(a_real, direction) # retore saver = tf.train.Saver() ckpt_path = utils.load_checkpoint('./checkpoints/' + dataset, sess, saver) if ckpt_path is None: raise Exception('No checkpoint!') else: print('Copy variables from % s' % ckpt_path) a_real_ipt = np.zeros( (1, len(a_real_ipt_ori), crop_size, crop_size, crop_size, 3)) a_real_ipt[0, ...] = im.imresize(a_real_ipt_ori, [crop_size, crop_size, crop_size]) a2b_opt = sess.run(a2b, feed_dict={a_real: a_real_ipt}) a2b_opt_ori = im.imresize(a2b_opt[0, ..., 0].squeeze(), size_ori) img_opt_ori = np.array([a_real_ipt_ori, a2b_opt_ori]) img_opt_ori = im.im2uint(im.immerge(img_opt_ori, 1, 1, 2)) writeGif(save_path, img_opt_ori, duration) print('save in %s' % save_path)
print('Copy variables from % s' % ckpt_path) # test a_list = glob('./datasets/' + dataset + '/testA/*.jpg') b_list = glob('./datasets/' + dataset + '/testB/*.jpg') a_save_dir = './test_predictions/' + dataset + '/testA/' b_save_dir = './test_predictions/' + dataset + '/testB/' utils.mkdir([a_save_dir, b_save_dir]) for i in range(len(a_list)): a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size]) a_real_ipt.shape = 1, crop_size, crop_size, 3 a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a], feed_dict={a_real: a_real_ipt}) a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0) img_name = os.path.basename(a_list[i]) im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name) print('Save %s' % (a_save_dir + img_name)) for i in range(len(b_list)): b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size]) b_real_ipt.shape = 1, crop_size, crop_size, 3 b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b], feed_dict={b_real: b_real_ipt}) b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0) img_name = os.path.basename(b_list[i]) im.imwrite(im.immerge(b_img_opt, 1, 3), b_save_dir + img_name) print('Save %s' % (b_save_dir + img_name))
print(len(shape)) variable_parameters = 1 for dim in shape: print(dim) variable_parameters *= dim.value print(variable_parameters) total_parameters += variable_parameters print("\nTotal parameters:\n", total_parameters) # start = time.time() # Inference for i in range(len(a_list)): # Define shapes for images fed to the graph a_feed = im.imresize(im.imread(a_list[i]), [crop_size, crop_size]) a_feed.shape = 1, crop_size, crop_size, 3 # Feed in images to the graph a2b_result = sess.run(a2b, feed_dict={a_input: a_feed}) # Create and save the output image a_img_opt = np.concatenate((a_feed, a2b_result), axis=0) img_name = os.path.basename(a_list[i]) im.imwrite(im.immerge(a_img_opt, 1, 2), a_save_dir + '/' + img_name) print('Save %s' % (a_save_dir + '/' + img_name)) # if i == 100: # end = time.time() # end2 = time.time() # print("Time to process first 100 images:", end - start) # print("Time to process all %d images: %f" % (i + 1, end2 - start))
a_real: a_ipt, }) summary_writer.add_summary(eval_summary_opt, it) # predict = sess.run(a2b, feed_dict={a_real: a_ipt}) print('predict: ', predict.shape) # label = np.zeros_like(a_real_ipt) # label[:,:,:,0] = a2b_opt[:,:,:,0] # print(a2b_opt) # sample_opt = np.concatenate((a_real_ipt, label), axis=0) sample_opt = a_ipt sample_opt[:, :, :, 0] = 0.7 * sample_opt[:, :, :, 0] + 0.3 * predict[:, :, :, 0] save_dir = './outputs/' + dataset + saveidx + '/sample_images_while_training/' utils.mkdir(save_dir) im.imwrite( im.immerge(sample_opt, len(sample_opt), 1), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch)) # except: # print("ERROR") # save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch)) # print('Model saved in file: % s' % save_path) # sess.close()
def train(): epoch = 200 batch_size = 1 lr = 0.0002 crop_size = 128 load_size = 128 tar_db_a = "cameron_images.tgz" tar_db_b = "teresa_images.tgz" db_a_i = importer_tar.Importer(tar_db_a) db_b_i = importer_tar.Importer(tar_db_b) image_a_names = db_a_i.get_sorted_image_name() image_b_names = db_b_i.get_sorted_image_name() train_a_size = int(len(image_a_names) * 0.8) train_b_size = int(len(image_b_names) * 0.8) image_a_train_names = image_a_names[0:train_a_size] image_b_train_names = image_b_names[0:train_b_size] image_a_test_names = image_a_names[train_a_size:] image_b_test_names = image_b_names[train_b_size:] print("A train size:{},test size:{}".format(len(image_a_train_names), len(image_a_test_names))) print("B train size:{},test size:{}".format(len(image_b_train_names), len(image_b_test_names))) """ graph """ # models generator_a2b = partial(models.generator, scope='a2b') generator_b2a = partial(models.generator, scope='b2a') discriminator_a = partial(models.discriminator, scope='a') discriminator_b = partial(models.discriminator, scope='b') # operations a_real_in = tf.placeholder(tf.float32, shape=[None, load_size, load_size, 3], name="a_real") b_real_in = tf.placeholder(tf.float32, shape=[None, load_size, load_size, 3], name="b_real") a_real = utils.preprocess_image(a_real_in, crop_size=crop_size) b_real = utils.preprocess_image(b_real_in, crop_size=crop_size) a2b = generator_a2b(a_real) b2a = generator_b2a(b_real) b2a2b = generator_a2b(b2a) a2b2a = generator_b2a(a2b) a_logit = discriminator_a(a_real) b2a_logit = discriminator_a(b2a) b_logit = discriminator_b(b_real) a2b_logit = discriminator_b(a2b) # losses g_loss_a2b = -tf.reduce_mean(a2b_logit) g_loss_b2a = -tf.reduce_mean(b2a_logit) cyc_loss_a = tf.losses.absolute_difference(a_real, a2b2a) cyc_loss_b = tf.losses.absolute_difference(b_real, b2a2b) g_loss = g_loss_a2b + g_loss_b2a + cyc_loss_a * 10.0 + cyc_loss_b * 10.0 wd_a = tf.reduce_mean(a_logit) - tf.reduce_mean(b2a_logit) wd_b = tf.reduce_mean(b_logit) - tf.reduce_mean(a2b_logit) gp_a = gradient_penalty(a_real, b2a, discriminator_a) gp_b = gradient_penalty(b_real, a2b, discriminator_b) d_loss_a = -wd_a + 10.0 * gp_a d_loss_b = -wd_b + 10.0 * gp_b # summaries utils.summary({ g_loss_a2b: 'g_loss_a2b', g_loss_b2a: 'g_loss_b2a', cyc_loss_a: 'cyc_loss_a', cyc_loss_b: 'cyc_loss_b' }) utils.summary({d_loss_a: 'd_loss_a'}) utils.summary({d_loss_b: 'd_loss_b'}) for variable in slim.get_model_variables(): tf.summary.histogram(variable.op.name, variable) merged = tf.summary.merge_all() im1_op = tf.summary.image("real_a", a_real_in) im2_op = tf.summary.image("a2b", a2b) im3_op = tf.summary.image("b2a2b", b2a2b) im4_op = tf.summary.image("real_b", b_real_in) im5_op = tf.summary.image("b2a", b2a) im6_op = tf.summary.image("b2a2b", b2a2b) # optim t_var = tf.trainable_variables() d_a_var = [var for var in t_var if 'a_discriminator' in var.name] d_b_var = [var for var in t_var if 'b_discriminator' in var.name] g_var = [ var for var in t_var if 'a2b_generator' in var.name or 'b2a_generator' in var.name ] d_a_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(d_loss_a, var_list=d_a_var) d_b_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(d_loss_b, var_list=d_b_var) g_train_op = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(g_loss, var_list=g_var) """ train """ ''' init ''' # session config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.allow_growth = True sess = tf.Session(config=config) # counter it_cnt, update_cnt = utils.counter() ''' summary ''' summary_writer = tf.summary.FileWriter('./outputs/summaries/', sess.graph) ''' saver ''' saver = tf.train.Saver(max_to_keep=5) ''' restore ''' ckpt_dir = './outputs/checkpoints/' utils.mkdir(ckpt_dir) try: utils.load_checkpoint(ckpt_dir, sess) except: sess.run(tf.global_variables_initializer()) '''train''' try: batch_epoch = min(train_a_size, train_b_size) // batch_size max_it = epoch * batch_epoch for it in range(sess.run(it_cnt), max_it): sess.run(update_cnt) epoch = it // batch_epoch it_epoch = it % batch_epoch + 1 # read data a_real_np = cv2.resize( db_a_i.get_image(image_a_train_names[it_epoch % batch_epoch]), (load_size, load_size)) b_real_np = cv2.resize( db_b_i.get_image(image_b_train_names[it_epoch % batch_epoch]), (load_size, load_size)) # train G sess.run(g_train_op, feed_dict={ a_real_in: [a_real_np], b_real_in: [b_real_np] }) # train discriminator sess.run([d_a_train_op, d_b_train_op], feed_dict={ a_real_in: [a_real_np], b_real_in: [b_real_np] }) # display if it % 100 == 0: # make summary summary = sess.run(merged, feed_dict={ a_real_in: [a_real_np], b_real_in: [b_real_np] }) summary_writer.add_summary(summary, it) print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch)) # save if (it + 1) % 1000 == 0: save_path = saver.save( sess, '{}/epoch_{}_{}.ckpt'.format(ckpt_dir, epoch, it_epoch)) print('###Model saved in file: {}'.format(save_path)) # sample if (it + 1) % 1000 == 0: a_test_index = int( np.random.uniform(high=len(image_a_test_names))) b_test_index = int( np.random.uniform(high=len(image_b_test_names))) a_real_np = cv2.resize( db_a_i.get_image(image_a_test_names[a_test_index]), (load_size, load_size)) b_real_np = cv2.resize( db_b_i.get_image(image_b_test_names[b_test_index]), (load_size, load_size)) [a_opt, a2b_opt, a2b2a_opt, b_opt, b2a_opt, b2a2b_opt ] = sess.run([a_real, a2b, a2b2a, b_real, b2a, b2a2b], feed_dict={ a_real_in: [a_real_np], b_real_in: [b_real_np] }) sample_opt = np.concatenate( (a_opt, a2b_opt, a2b2a_opt, b_opt, b2a_opt, b2a2b_opt), axis=0) [im1_sum,im2_sum,im3_sum,im4_sum,im5_sum,im6_sum] = \ sess.run([im1_op,im2_op,im3_op,im4_op,im5_op,im6_op], feed_dict={a_real_in: [a_real_np], b_real_in: [b_real_np]}) summary_writer.add_summary(im1_sum, it) summary_writer.add_summary(im2_sum, it) summary_writer.add_summary(im3_sum, it) summary_writer.add_summary(im4_sum, it) summary_writer.add_summary(im5_sum, it) summary_writer.add_summary(im6_sum, it) save_dir = './outputs/sample_images_while_training/' utils.mkdir(save_dir) im.imwrite( im.immerge(sample_opt, 2, 3), '{}/epoch_{}_it_{}.jpg'.format(save_dir, epoch, it_epoch)) except: raise finally: save_path = saver.save( sess, '{}/epoch_{}_{}.ckpt'.format(ckpt_dir, epoch, it_epoch)) print('###Model saved in file: {}'.format(save_path)) sess.close()
if (it + 1) % 1000 == 0: save_path = saver.save( sess, f'{ckpt_dir}/Epoch_({epoch})_({it_epoch}of{batch_epoch}).ckpt') print(f'Model saved in file: {save_path}') if (it + 1) % 100 == 0: a_real_ipt = a_test_pool.batch() b_real_ipt = b_test_pool.batch() [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={ a_real: a_real_ipt, b_real: b_real_ipt }) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) save_dir = './outputs/sample_images_while_training/' + dataset utils.mkdir(save_dir) im.imwrite( im.immerge(sample_opt, 2, 3), f'{save_dir}/Epoch_({epoch})_({it_epoch}of{batch_epoch}).jpg') except: save_path = saver.save( sess, f'{ckpt_dir}/Epoch_({epoch})_({it_epoch}of{batch_epoch}).ckpt') print(f'Model saved in file: {save_path}') sess.close()
start = time.time() # Inference for i in range(len(a_list)): # Define shapes for images fed to the graph inputShape = (150, 150, 256) inputTensor = np.zeros(inputShape) a_feed = inputTensor # Feed in images to the graph a2b_result = sess.run(a_output, feed_dict={a_input: a_feed}) print(type(a2b_result)) print(a2b_result.shape) # Create and save the output image a_img_opt = a2b_result img_name = os.path.basename(a_list[i]) output = im.immerge(a_img_opt, 1, 1) im.imwriteShow(output, a_save_dir + '/' + img_name) print('Save %s' % (a_save_dir + '/' + img_name)) if i == 100: end = time.time() end2 = time.time() # print("Time to process first 100 images:", end - start) print("Time to process all %d images: %f" % (i + 1, end2 - start))
a_output = graph.get_tensor_by_name( 'a2b_generator/output_image:0') # Output Tensor # Initialize_all_variables tf.global_variables_initializer() start = time.time() # Inference for i in range(len(a_list)): # Define shapes for images fed to the graph a_feed = im.imresize(im.imread(a_list[i]), [crop_size, crop_size]) a_feed.shape = 1, crop_size, crop_size, 3 # Feed in images to the graph a2b_result = sess.run(a_output, feed_dict={a_input: a_feed}) # Create and save the output image a_img_opt = np.concatenate((a_feed, a2b_result), axis=0) img_name = os.path.basename(a_list[i]) im.imwrite(im.immerge(a_img_opt, 1, 2), a_save_dir + '/' + img_name) print('Save %s' % (a_save_dir + '/' + img_name)) if i == 100: end = time.time() end2 = time.time() # print("Time to process first 100 images:", end - start) print("Time to process all %d images: %f" % (i + 1, end2 - start))
# display if it % 1 == 0: print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch)) # save if (it + 1) % 1000 == 0: save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch)) print('Model saved in file: % s' % save_path) # sample if (it + 1) % 100 == 0: a_real_ipt = a_test_pool.batch() b_real_ipt = b_test_pool.batch() [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt}) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) save_dir = './sample_images_while_training/' + dataset utils.mkdir(save_dir + '/') im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch)) except Exception as e: coord.request_stop(e) finally: print("Stop threads and close session!") coord.request_stop() coord.join(threads) sess.close() sys.exit()
def save_single_img(a_real_ipt, b_real_ipt, save_dir, fname, forward_mapping=True): [a2b_opt] = sess.run([a2b if forward_mapping else b2a], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt}) sample_opt = np.array(a2b_opt) utils.mkdir(save_dir + '/') targetDir = '%s/%s' % (save_dir, fname) im.imwrite(im.immerge(sample_opt,1,1), targetDir)
# Sample images for external evaluation (i.e. just raw single images). Note: For triplet=true, there are 2x steps involved. if args.samplingcycle > 0 and (it % args.samplingcycle == 0) and it > 0: print("Create samples for the external evaluator (aux batch {} with size {})".format(int(it/args.samplingcycle), args.online_sampling_batch_size)) for c_i in range(args.online_sampling_batch_size): fname = 'Transformed_from_%s_(%dof%d)_once.png' % (args.stage2, c_i, args.singletestN) save_single_img(a_real_ipt = b_test_pool.batch(), b_real_ipt = c_test_pool.batch(), save_dir = './aux_samples/' + args.dataset + subnet_ext_maybe+'/'+args.stage2+'/'+str(int(it)), fname=fname) # Create sample images with a-b-a structure if (it + 1) % 100 == 0: a_real_ipt = b_test_pool.batch() b_real_ipt = c_test_pool.batch() [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a_real_ipt, b_real: b_real_ipt}) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d).png' % (online_samples_dir, epoch, it_epoch, batch_epoch)) if args.double_cycle: [a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt] = sess.run([a2b, a2b2a, b2a, b2a2b], feed_dict={a_real: a2b_opt, b_real: b2a_opt}) sample_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt, b_real_ipt, b2a_opt, b2a2b_opt), axis=0) im.imwrite(im.immerge(sample_opt, 2, 3), '%s/Epoch_(%d)_(%dof%d)_double_cycle.png' % (online_samples_dir, epoch, it_epoch, batch_epoch)) if do_save and last_it != -1: save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d)_step_(%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch,last_it)) print('Final model saved in file: % s' % save_path) elif args.chaintestdir: chaintests_N = 20 print("Run chain test on dir {} for {} times".format(args.chaintestdir, chaintests_N)) for c_i in range(chaintests_N): a_real_ipt = b_test_pool.batch()
raise Exception('No checkpoint!') else: print('Copy variables from % s' % ckpt_path) # test a_list = glob('./datasets/' + dataset + '/testA/*.jpg') b_list = glob('./datasets/' + dataset + '/testB/*.jpg') a_save_dir = './test_predictions/' + dataset + '/testA/' b_save_dir = './test_predictions/' + dataset + '/testB/' utils.mkdir([a_save_dir, b_save_dir]) for i in range(len(a_list)): a_real_ipt = im.imresize(im.imread(a_list[i]), [crop_size, crop_size]) a_real_ipt.shape = 1, crop_size, crop_size, 3 a2b_opt, a2b2a_opt = sess.run([a2b, a2b2a], feed_dict={a_real: a_real_ipt}) a_img_opt = np.concatenate((a_real_ipt, a2b_opt, a2b2a_opt), axis=0) img_name = os.path.basename(a_list[i]) im.imwrite(im.immerge(a_img_opt, 1, 3), a_save_dir + img_name) print('Save %s' % (a_save_dir + img_name)) for i in range(len(b_list)): b_real_ipt = im.imresize(im.imread(b_list[i]), [crop_size, crop_size]) b_real_ipt.shape = 1, crop_size, crop_size, 3 b2a_opt, b2a2b_opt = sess.run([b2a, b2a2b], feed_dict={b_real: b_real_ipt}) b_img_opt = np.concatenate((b_real_ipt, b2a_opt, b2a2b_opt), axis=0) img_name = os.path.basename(b_list[i]) im.imwrite(im.immerge(b_img_opt, 1, 3), b_save_dir + img_name) print('Save %s' % (b_save_dir + img_name))
ab_pair_test_pool = data.ImageDataPair(sess, a_test_img_paths, batch_size=3, load_size=load_size, shuffle=False, crop_size=crop_size) ab_pair_ipt = ab_pair_test_pool.batch_match() a_ipt = ab_pair_ipt[:,:,:,:3] b_ipt = ab_pair_ipt[:,:,:,3:] predict = sess.run(predict_op, feed_dict={a_real: a_ipt}) print('predict: ', predict.shape) sample_opt = a_ipt sample_opt[:,:,:,0] = 0.7*sample_opt[:,:,:,0] + 0.3* predict[:,:,:,0] im.imwrite(im.immerge(sample_opt, len(sample_opt), 1), './result.jpg') # test_img_paths = glob(input_path + '*') # for path in test_img_paths: # img = tf.read_file(path) # img = tf.image.decode_jpeg(img, channels=3) # img = tf.image.resize_images(img, load_size) # img = (img - tf.reduce_min(img)) / (tf.reduce_max(img) - tf.reduce_min(img)) # img = tf.expand_dims(img, axis=0) # img = sess.run(img) # predict_op = tf.image.resize_images(predict_op, [376, 1242]) # predict = sess.run(predict_op, feed_dict={a_real: img})