def main(): args = parser.parse_args() input_dir = args.input output_dir = args.output output_dir_A = output_dir + '/A' output_dir_B = output_dir + '/B' if not os.path.exists(output_dir_A): os.makedirs(output_dir_A) if not os.path.exists(output_dir_B): os.makedirs(output_dir_B) print('Reading the database...') database = db.DBreader(input_dir, batch_size=1, labeled=False, shuffle=False) print('Database load complete!!') total_batch = database.total_batch print('Generating...') for step in range(total_batch): print(str(step) + '/' + str(total_batch)) img = database.next_batch() img_A = img[:, :, 0:256, :] img_B = img[:, :, 256:, :] img_A = img_A.reshape(256, 256, 3) img_B = img_B.reshape(256, 256, 3) scipy.misc.imsave(output_dir_A + '/' + str(step+1).zfill(6) + '.png', img_A) scipy.misc.imsave(output_dir_B + '/' + str(step+1).zfill(6) + '.png', img_B) print('finished!!')
def main2(): global_epoch = tf.Variable(0, trainable=False, name='global_step') global_epoch_increase = tf.assign(global_epoch, tf.add(global_epoch, 1)) args = parser.parse_args() direction = args.direction filelist_train = args.train result_dir = args.out_dir + '/result' ckpt_dir = args.out_dir + '/checkpoint' if not os.path.exists(result_dir): os.makedirs(result_dir) if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) total_epoch = args.epochs batch_size = args.batch_size database = db.DBreader(filelist_train, batch_size=batch_size, labeled=False, resize=[256, 512]) sess = tf.Session() model = Pix2Pix(sess, batch_size) saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) total_batch = database.total_batch epoch = sess.run(global_epoch) while True: if epoch == total_epoch: break for step in range(total_batch): img_input, img_target = split_images(database.next_batch(), direction) img_target = normalize(img_target) img_input = normalize(img_input) loss_D = model.train_discrim(img_input, img_target) # Train Discriminator and get the loss value loss_GAN, loss_L1 = model.train_gen(img_input, img_target) # Train Generator and get the loss value if step % 100 == 0: print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step, '/', total_batch, '], D_loss: ', loss_D, ', G_loss_GAN: ', loss_GAN, ', G_loss_L1: ', loss_L1) if step % 500 == 0: generated_samples = denormalize(model.sample_generator(img_input, batch_size=batch_size)) img_target = denormalize(img_target) img_input = denormalize(img_input) img_for_vis = np.concatenate([img_input, generated_samples, img_target], axis=2) savepath = result_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.jpg' save_visualization(img_for_vis, (batch_size, 1), save_path=savepath) epoch = sess.run(global_epoch_increase) saver.save(sess, ckpt_dir + '/model_epoch'+str(epoch).zfill(3))
def main(): args = parser.parse_args() filelist_test = args.test result_dir = args.out_dir + '/result' ckpt_dir = args.ckpt_dir back_dir = args.out_dir + '/back' if not os.path.exists(result_dir): os.makedirs(result_dir) batch_size = args.visnum database = db.DBreader(filelist_test, batch_size=batch_size, labeled=False, resize=[256, 256], shuffle=False) # databaseo = db.DBreader(filelist_toriginal, batch_size=batch_size, labeled=False, resize=[256, 256], suffle=False) sess = tf.Session() model = Pix2Pix(sess, batch_size) saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sys.exit("There is no trained model") total_batch = database.total_batch print('Generating...') for step in range(total_batch): #img_input, img_target = split_images(database.next_batch(), direction) #img_target = normalize(databaseo.next_batch()) img_input = normalize(database.next_batch()) generated_samples = denormalize( model.sample_generator(img_input, batch_size=batch_size)) #img_target = denormalize(img_target) img_input = denormalize(img_input) img_for_vis = np.concatenate([img_input, generated_samples], axis=2) savepath = result_dir + '/output_' + "_Batch" + str(step).zfill( 6) + '.png' savepath2 = back_dir + '/output_' + "_Batch" + str(step).zfill( 6) + '(back).png' save_visualization(img_for_vis, (batch_size, 1), save_path=savepath) save_visualization2(generated_samples, (batch_size, 1), save_path=savepath2) print('finished!!')
def main(): args = parser.parse_args() filelist_dir = args.filelist output_dir = args.out_dir if not os.path.exists(output_dir): os.makedirs(output_dir) total_epoch = args.epochs batch_size = args.batch_size n_noise = 100 database = db.DBreader(filelist_dir, batch_size, resize=[64, 64, 3], labeled=False) sess = tf.Session() model = DCGAN(sess, batch_size) sess.run(tf.global_variables_initializer()) total_batch = database.total_batch visualization_num = 14 * 14 noise_test = np.random.normal(size=(visualization_num, n_noise)) loss_D = 0.0 loss_G = 0.0 for epoch in range(total_epoch): for step in range(total_batch): batch_xs = database.next_batch() # Get the next batch batch_xs = batch_xs * (2.0 / 255.0) - 1 # normalization noise_g = np.random.normal(size=(batch_size, n_noise)) noise_d = np.random.normal(size=(batch_size, n_noise)) # Train Generator twice while training Discriminator once for first 200 steps if epoch == 0 and step < 200: adventage = 2 else: adventage = 1 if step % adventage == 0: loss_D = model.train_discrim(batch_xs, noise_d) # Train Discriminator and get the loss value loss_G = model.train_gen(noise_g) # Train Generator and get the loss value print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step, '/', total_batch, '], D_loss: ', loss_D, ', G_loss: ', loss_G) if step == 0 or (step + 1) % 10 == 0: generated_samples = model.sample_generator(noise_test, batch_size=visualization_num) savepath = output_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.jpg' save_visualization(generated_samples, (14, 14), save_path=savepath)
def train(): input_A_place = tf.placeholder(tf.float32, shape=[None, image_height, image_width, 3], name="input_A") input_B_place = tf.placeholder(tf.float32, shape=[None, image_height, image_width, 3], name="input_B") fake_pool_A_place = tf.placeholder( tf.float32, shape=[None, image_height, image_width, 3], name="fake_pool_A") fake_pool_B_place = tf.placeholder( tf.float32, shape=[None, image_height, image_width, 3], name="fake_pool_B") is_training_place = tf.placeholder(tf.bool, shape=(), name="is_training") cycleGAN = CycleGAN(is_training_place, lambda_reconst) Gen_AB_loss, Gen_BA_loss, Dis_A_loss, Dis_B_loss,fake_A,fake_B= \ cycleGAN.build_CycleGAN(input_A_place,input_B_place, fake_pool_A_place,fake_pool_B_place) gen_A2B_vars, gen_B2A_vars, dis_A_vars, dis_B_vars = cycleGAN.get_vars() global_step = tf.Variable(-1, trainable=False, name="global_step") global_step_increase = tf.assign(global_step, tf.add(global_step, 1)) learning_rate = (tf.where( tf.greater_equal(global_step, start_decay_step), tf.train.polynomial_decay(starter_learning_rate, global_step - start_decay_step, decay_steps, end_learning_rate, power=1.0), starter_learning_rate)) train_op_G = tf.train.AdamOptimizer(learning_rate, beta1=0.5, ). \ minimize(Gen_AB_loss+Gen_BA_loss, var_list=gen_A2B_vars+gen_B2A_vars) train_op_D = tf.train.AdamOptimizer(learning_rate, beta1=0.5, ). \ minimize(Dis_A_loss+Dis_B_loss, var_list=dis_A_vars+dis_B_vars) A2B_out, ABA_out = cycleGAN.sample_generate(input_A_place, "A2B") A2B_output = tf.identity(A2B_out, name="A2B_output") B2A_out, BAB_out = cycleGAN.sample_generate(input_B_place, "B2A") B2A_output = tf.identity(B2A_out, name="B2A_output") fake_A_pool = ImagePool(pool_size) fake_B_pool = ImagePool(pool_size) saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(ckpt_path) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) saver.restore(sess, os.path.join(ckpt_path, ckpt_name)) _global_step = sess.run(global_step_increase) database_A = db.DBreader(db_dir_A, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) db_for_vis_A = db.DBreader(db_dir_A, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) database_B = db.DBreader(db_dir_B, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) db_for_vis_B = db.DBreader(db_dir_B, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) while _global_step < Train_Step: images_A = database_A.next_batch() images_B = database_B.next_batch() feed_dict_pool = { input_A_place: images_A, input_B_place: images_B, is_training_place: True } fake_A_vals, fake_B_vals = sess.run([fake_A, fake_B], feed_dict=feed_dict_pool) feed_dict_train = { input_A_place: images_A, input_B_place: images_B, is_training_place: True, fake_pool_A_place: fake_A_pool.query(fake_A_vals), fake_pool_B_place: fake_B_pool.query(fake_B_vals) } sess.run(train_op_D, feed_dict=feed_dict_train) sess.run(train_op_G, feed_dict=feed_dict_train) sess.run(train_op_G, feed_dict=feed_dict_train) _Gen_AB_loss, _Gen_BA_loss,_Dis_A_loss, _Dis_B_loss\ = sess.run([Gen_AB_loss, Gen_BA_loss, Dis_A_loss, Dis_B_loss],feed_dict=feed_dict_train) print( "Step:{},Gen_AB_loss:{},Gen_BA_loss:{},Dis_A_loss:{},Dis_B_loss:{}" .format( _global_step, _Gen_AB_loss, _Gen_BA_loss, _Dis_A_loss, _Dis_B_loss, )) if _global_step % 100 == 0: scipy.misc.imsave( "pool_res/fake_A_pool_{}.jpg".format(_global_step), (fake_A_pool.query(fake_A_vals)[0] + 1) / 2 * 255.0) scipy.misc.imsave( "pool_res/fake_B_pool_{}.jpg".format(_global_step), (fake_B_pool.query(fake_B_vals)[0] + 1) / 2 * 255.0) test_images_A = db_for_vis_A.next_batch() test_images_B = db_for_vis_B.next_batch() #save result form A to B _A2B_output, _ABA_out = sess.run([A2B_output, ABA_out], feed_dict={ input_A_place: test_images_A, is_training_place: False }) _A2B_output = (_A2B_output + 1) / 2 * 255.0 _ABA_out = (_ABA_out + 1) / 2 * 255.0 for ind, trg_image in enumerate(_A2B_output[:sample_num]): scipy.misc.imsave( result_dir + "/{}_{}_A.jpg".format(_global_step, ind), test_images_A[ind]) scipy.misc.imsave( result_dir + "/{}_{}_A2B.jpg".format(_global_step, ind), _A2B_output[ind]) scipy.misc.imsave( result_dir + "/{}_{}_ABA.jpg".format(_global_step, ind), _ABA_out[ind]) # save result form B to A _B2A_output, _BAB_out = sess.run([B2A_output, BAB_out], feed_dict={ input_B_place: test_images_B, is_training_place: False }) _B2A_output = (_B2A_output + 1) / 2 * 255.0 _BAB_out = (_BAB_out + 1) / 2 * 255.0 for ind, trg_image in enumerate(_B2A_output[:sample_num]): scipy.misc.imsave( result_dir + "/{}_{}_B.jpg".format(_global_step, ind), test_images_B[ind]) scipy.misc.imsave( result_dir + "/{}_{}_B2A.jpg".format(_global_step, ind), _B2A_output[ind]) scipy.misc.imsave( result_dir + "/{}_{}_BAB.jpg".format(_global_step, ind), _BAB_out[ind]) if _global_step % 100000 == 0: # 保存PB constant_graph = graph_util.convert_variables_to_constants( sess, sess.graph_def, ["A2B_output", "B2A_output"]) save_model_name = model_name + "-" + str(_global_step) + ".pb" with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw: fw.write(constant_graph.SerializeToString()) # 保存CKPT saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=_global_step) print("Successfully saved model {}".format(save_model_name)) # return _global_step = sess.run(global_step_increase)
def main(): global_epoch = tf.Variable(0, trainable=False, name='global_step') global_epoch_increase = tf.assign(global_epoch, tf.add(global_epoch, 1)) args = parser.parse_args() filelist_train = args.train filelist_original = args.original result_dir = args.out_dir + '/result' back_dir = args.out_dir + '/back' ckpt_dir = args.out_dir + '/checkpoint' if not os.path.exists(result_dir): os.makedirs(result_dir) if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) total_epoch = args.epochs batch_size = args.batch_size database = db.DBreader(filelist_train, batch_size=batch_size, labeled=False, resize=[256, 256]) databaseo = db.DBreader(filelist_original, batch_size=batch_size, labeled=False, resize=[256, 256]) sess = tf.Session() model = Pix2Pix(sess, batch_size) saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) total_batch = database.total_batch epoch = sess.run(global_epoch) print(total_batch) lossd = [] lossgan = [] lossl1 = [] while True: templossd = [] templossgan = [] templossl1 = [] if epoch == total_epoch: flg.plot(range(1, len(lossd) + 1), lossd, 'r', label='loss_Dis') flg.plot(range(1, len(lossgan) + 1), lossgan, 'g', label='loss_Gan') flg.plot(range(1, len(lossl1) + 1), lossl1, 'b', label='loss_L1') flg.legend(loc='upper right') flg.ylabel('loss') flg.xlabel('Number of epoch') flg.savefig('graph99(50%).png') break for step in range(total_batch): img_target = normalize(databaseo.next_batch()) img_input = normalize(database.next_batch()) loss_D = model.train_discrim( img_input, img_target) # Train Discriminator and get the loss value loss_GAN, loss_L1 = model.train_gen( img_input, img_target) # Train Generator and get the loss value templossd.append(loss_D) templossgan.append(loss_GAN) templossl1.append(loss_L1) print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step, '/', total_batch, '], D_loss: ', loss_D, ', G_loss_GAN: ', loss_GAN, ', G_loss_L1: ', loss_L1) # if epoch % 5 == 0: # generated_samples = denormalize(model.sample_generator(img_input, batch_size=batch_size)) # img_target = denormalize(img_target) # img_input = denormalize(img_input) # img_for_vis = np.concatenate([img_input, generated_samples, img_target], axis=2) # savepath = result_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.png' # savepath2 = back_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '(back).png' # save_visualization(img_for_vis, (batch_size, 1), save_path=savepath) # save_visualization2(generated_samples, (batch_size, 1), save_path=savepath2) lossd.append(np.mean(templossd)) lossgan.append(np.mean(templossgan)) lossl1.append(np.mean(templossl1)) epoch = sess.run(global_epoch_increase) saver.save(sess, ckpt_dir + '/model_epoch' + str(epoch).zfill(3))
def train(): input_A_place = tf.placeholder(tf.float32, shape=[None, image_height, image_width, 3], name="input_A") input_B_place = tf.placeholder(tf.float32, shape=[None, image_height, image_width, 3], name="input_B") is_training_place = tf.placeholder(tf.bool, shape=(), name="is_training") keep_prob_place = tf.placeholder_with_default(1.0, shape=(), name="keep_prob") dualgan = DualGAN(is_training_place, keep_prob_place, lambda_reconst) G_loss, D_loss = dualgan.build_DualGAN(input_A_place, input_B_place) g_vars, d_vars = dualgan.get_vars() global_step = tf.Variable(-1, trainable=False, name="global_step") global_step_increase = tf.assign(global_step, tf.add(global_step, 1)) # 不要使用with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS))来更新batchnorm的参数 # 因为其tf.GraphKeys.UPDATE_OPS包含了生成器和判别器所有的batchnorm的参数 train_op_D = tf.train.RMSPropOptimizer( learning_rate, decay=decay).minimize(D_loss, var_list=d_vars) train_op_G = tf.train.RMSPropOptimizer( learning_rate, decay=decay).minimize(G_loss, var_list=g_vars) A2B_out, ABA_out = dualgan.sample_generate(input_A_place, "A2B") A2B_output = tf.identity(A2B_out, name="A2B_output") B2A_out, BAB_out = dualgan.sample_generate(input_B_place, "B2A") B2A_output = tf.identity(B2A_out, name="B2A_output") saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(ckpt_path) if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) saver.restore(sess, os.path.join(ckpt_path, ckpt_name)) _global_step = sess.run(global_step_increase) database_A = db.DBreader(db_dir_A, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) db_for_vis_A = db.DBreader(db_dir_A, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) database_B = db.DBreader(db_dir_B, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) db_for_vis_B = db.DBreader(db_dir_B, batch_size=batch_size, labeled=False, resize=[image_height, image_width]) while _global_step < Train_Step: images_A = database_A.next_batch() images_B = database_B.next_batch() feed_dict = { input_A_place: images_A, input_B_place: images_B, is_training_place: True, keep_prob_place: 0.5 } sess.run(train_op_D, feed_dict=feed_dict) sess.run(train_op_G, feed_dict=feed_dict) sess.run(train_op_G, feed_dict=feed_dict) _D_loss, _G_loss = sess.run([D_loss, G_loss], feed_dict=feed_dict) print("Step:{},D_loss:{},G_loss:{}".format( _global_step, _D_loss, _G_loss, )) if _global_step % 200 == 0: test_images_A = db_for_vis_A.next_batch() test_images_B = db_for_vis_B.next_batch() #save result form A to B _A2B_output, _ABA_out = sess.run( [A2B_output, ABA_out], feed_dict={ input_A_place: test_images_A, is_training_place: False, keep_prob_place: 1.0 }) _A2B_output = (_A2B_output + 1) / 2 * 255.0 _ABA_out = (_ABA_out + 1) / 2 * 255.0 for ind, trg_image in enumerate(_A2B_output[:sample_num]): scipy.misc.imsave( result_dir + "/{}_{}_A.jpg".format(_global_step, ind), test_images_A[ind]) scipy.misc.imsave( result_dir + "/{}_{}_A2B.jpg".format(_global_step, ind), _A2B_output[ind]) scipy.misc.imsave( result_dir + "/{}_{}_ABA.jpg".format(_global_step, ind), _ABA_out[ind]) # save result form B to A _B2A_output, _BAB_out = sess.run( [B2A_output, BAB_out], feed_dict={ input_B_place: test_images_B, is_training_place: False, keep_prob_place: 1.0 }) _B2A_output = (_B2A_output + 1) / 2 * 255.0 _BAB_out = (_BAB_out + 1) / 2 * 255.0 for ind, trg_image in enumerate(_B2A_output[:sample_num]): scipy.misc.imsave( result_dir + "/{}_{}_B.jpg".format(_global_step, ind), test_images_B[ind]) scipy.misc.imsave( result_dir + "/{}_{}_B2A.jpg".format(_global_step, ind), _B2A_output[ind]) scipy.misc.imsave( result_dir + "/{}_{}_BAB.jpg".format(_global_step, ind), _BAB_out[ind]) if _global_step % 10000 == 0: # 保存PB constant_graph = graph_util.convert_variables_to_constants( sess, sess.graph_def, ["A2B_output", "B2A_output"]) save_model_name = model_name + "-" + str(_global_step) + ".pb" with tf.gfile.FastGFile(pb_path + save_model_name, mode="wb") as fw: fw.write(constant_graph.SerializeToString()) # 保存CKPT saver.save(sess, ckpt_path + model_name + ".ckpt", global_step=_global_step) print("Successfully saved model {}".format(save_model_name)) # return _global_step = sess.run(global_step_increase)
def main(): global_epoch = tf.Variable(0, trainable=False, name='global_step') global_epoch_increase = tf.assign(global_epoch, tf.add(global_epoch, 1)) args = parser.parse_args() db_dir_A = args.train_A db_dir_B = args.train_B result_dir_AtoB = args.out_dir + '/result/AtoB' result_dir_BtoA = args.out_dir + '/result/BtoA' ckpt_dir = args.out_dir + '/checkpoint' if not os.path.exists(result_dir_AtoB): os.makedirs(result_dir_AtoB) if not os.path.exists(result_dir_BtoA): os.makedirs(result_dir_BtoA) if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) total_epoch = args.epochs batch_size = args.batch_size vis_num = 4 database_A = db.DBreader(db_dir_A, batch_size=batch_size, labeled=False, resize=[64, 64]) db_for_vis_A = db.DBreader(db_dir_A, batch_size=vis_num, labeled=False, resize=[64, 64]) database_B = db.DBreader(db_dir_B, batch_size=batch_size, labeled=False, resize=[64, 64]) db_for_vis_B = db.DBreader(db_dir_B, batch_size=vis_num, labeled=False, resize=[64, 64]) sess = tf.Session() model = Discogan(sess, batch_size) saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state(ckpt_dir) if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) total_batch_A = database_A.total_batch total_batch_B = database_B.total_batch if total_batch_A > total_batch_B: total_batch = total_batch_B else: total_batch = total_batch_A epoch = sess.run(global_epoch) while True: if epoch == total_epoch: break for step in range(total_batch): input_A = normalize(database_A.next_batch()) input_B = normalize(database_B.next_batch()) if step % 2 == 0: loss_D = model.train_discrim( input_A, input_B, epoch * total_batch + step) # Train Discriminator and get the loss value loss_G = model.train_gen( input_A, input_B, epoch * total_batch + step) # Train Generator and get the loss value if step % 100 == 0: print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step, '/', total_batch, '], D_loss: ', loss_D, ', G_loss: ', loss_G) if step % 500 == 0: for_vis_A = normalize(db_for_vis_A.next_batch()) for_vis_B = normalize(db_for_vis_B.next_batch()) generated_samples_AB = denormalize( model.sample_generate(for_vis_A, 'AB', batch_size=4)) generated_samples_ABA = denormalize( model.sample_generate(for_vis_A, 'ABA', batch_size=4)) generated_samples_BA = denormalize( model.sample_generate(for_vis_B, 'BA', batch_size=4)) generated_samples_BAB = denormalize( model.sample_generate(for_vis_B, 'BAB', batch_size=4)) img_for_vis_AB = np.concatenate([ denormalize(for_vis_A), generated_samples_AB, generated_samples_ABA ], axis=2) img_for_vis_BA = np.concatenate([ denormalize(for_vis_B), generated_samples_BA, generated_samples_BAB ], axis=2) savepath_AB = result_dir_AtoB + '/output_' + 'EP' + str( epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.jpg' savepath_BA = result_dir_BtoA + '/output_' + 'EP' + str( epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.jpg' save_visualization(img_for_vis_AB, (vis_num, 1), save_path=savepath_AB) save_visualization(img_for_vis_BA, (vis_num, 1), save_path=savepath_BA) epoch = sess.run(global_epoch_increase) saver.save(sess, ckpt_dir + '/model_epoch' + str(epoch).zfill(3))
def main(args): """ Parameters """ batch_size = args.batch_size num_frames_per_clip = args.num_frames_per_clip dataset_shuffle = args.dataset_shuffle num_epochs = args.num_epochs initial_learning_rate = args.initial_learning_rate z_dim = args.z_dim image_crop_size = args.image_crop_size ''' Dataset Reader''' reader=db.DBreader(batch_size=batch_size, n_frames_clip=num_frames_per_clip, resize=[image_crop_size, image_crop_size], shuffle=dataset_shuffle) ''' Build Graph''' # input placeholder x = tf.placeholder(tf.float32, shape=[batch_size, num_frames_per_clip, IMAGE_CROP_SIZE, IMAGE_CROP_SIZE, 1]) z_sample = tf.placeholder(tf.float32, shape=[batch_size, 1, image_crop_size//4, image_crop_size//4, z_dim]) ''' Network Architecture''' model = AAE() y, z, neg_marginal_likelihood, D_loss, G_loss = model.adversarial_autoencoder(x, z_sample) ''' Optimization ''' t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if "Discriminator" in var.name] g_vars = [var for var in t_vars if "Encoder" in var.name] ae_vars = [var for var in t_vars if "Encoder" or "Decoder" in var.name] train_op_ae = tf.train.AdamOptimizer(initial_learning_rate).minimize(neg_marginal_likelihood, var_list=ae_vars) train_op_d = tf.train.AdamOptimizer(initial_learning_rate/5).minimize(D_loss, var_list=d_vars) train_op_g = tf.train.AdamOptimizer(initial_learning_rate).minimize(G_loss, var_list=g_vars) ''' Training ''' total_batch = reader.n_train_clips // batch_size with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print("Variable Initialized") for epoch in range(num_epochs): # Train Dataset Random Shuffling reader.initialize(True) for i in range(total_batch): train_x = reader.next_batch() / 255. # now here, generate z_sample by random noise # z_sample.shape.as_list() --> sample's shape train_z_sample = np.random.random(z_sample.shape.as_list()) # Reconstruction Loss _, loss_likelihood = sess.run([train_op_ae, neg_marginal_likelihood], feed_dict={x:train_x, z_sample:train_z_sample}) # Discriminator loss _, d_loss = sess.run([train_op_d, D_loss], feed_dict={x:train_x, z_sample:train_z_sample}) # Generator loss for _ in range(2): _, g_loss = sess.run([train_op_g, G_loss], feed_dict={x:train_x, z_sample:train_z_sample}) tot_loss = loss_likelihood + d_loss + g_loss print(" >> [%03d - %d/%d]: L_tot %03.2f, L_likelihood %03.2f, d_loss %03.2f, g_loss %03.2f" % (epoch, i, total_batch, tot_loss, loss_likelihood, d_loss, g_loss)) # print cost every epoch print("epoch %03d: L_tot %03.2f, L_likelihood %03.2f, d_loss %03.2f, g_loss %03.2f" % (epoch, tot_loss, loss_likelihood, d_loss, g_loss))