def test(net, test_input_image_dir, direction): # prepare dataset test_dataset = helper.Dataset(test_input_image_dir, convert_to_lab_color=False, direction=direction, is_test=True) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('./checkpoints')) for ii in range(test_dataset.n_images): test_image = test_dataset.get_image_by_index(ii) test_a = [x for x, y in test_image] test_b = [y for x, y in test_image] test_a = np.array(test_a) test_b = np.array(test_b) gen_image = sess.run(generator(net.gen_inputs, net.input_channel, reuse=True, is_training=True), feed_dict={net.gen_inputs: test_a}) image_fn = './assets/test_result{:d}_tf.png'.format(ii) helper.save_result(image_fn, gen_image, input_image=test_a, target_image=test_b)
def train(net, epochs, batch_size, train_input_image_dir, test_image, direction, dataset_name, print_every=30): losses = [] steps = 0 # prepare saver for saving trained model saver = tf.train.Saver() # prepare dataset train_dataset = helper.Dataset(train_input_image_dir, convert_to_lab_color=False, direction=direction, is_test=False) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(train_dataset.n_images//batch_size): steps += 1 # will return list of tuples [ (inputs, targets), (inputs, targets), ... , (inputs, targets)] batch_images_tuple = train_dataset.get_next_batch(batch_size) a = [x for x, y in batch_images_tuple] b = [y for x, y in batch_images_tuple] a = np.array(a) b = np.array(b) fd = { net.dis_inputs: a, net.dis_targets: b, net.gen_inputs: a } d_opt_out = sess.run(net.d_train_opt, feed_dict=fd) g_opt_out = sess.run(net.g_train_opt, feed_dict=fd) if steps % print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval(fd) train_loss_g = net.g_loss.eval(fd) train_loss_gan = net.gen_loss_GAN.eval(fd) train_loss_l1 = net.gen_loss_L1.eval(fd) print("Epoch {}/{}...".format(e + 1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss GAN: {:.4f}".format(train_loss_gan), "Generator Loss L1: {:.4f}".format(train_loss_l1), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_gan)) # save generated images on every epochs test_a = [x for x, y in test_image] test_a = np.array(test_a) gen_image = sess.run(generator(net.gen_inputs, net.input_channel, reuse=True, is_training=False), feed_dict={net.gen_inputs: test_a}) image_fn = './assets/epoch_{:d}_tf.png'.format(e) helper.save_result(image_fn, gen_image) ckpt_fn = './checkpoints/pix2pix-{}.ckpt'.format(dataset_name) saver.save(sess, ckpt_fn) return losses
def train_celeb(epochs, batch_size, z_dim, learning_rate, beta1): """ Train the GAN using the CelebA dataset :param epochs: number of epochs to train """ celeba_dataset = helper.Dataset('celeba', glob(os.path.join(DATA_DIR, 'img_align_celeba/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
def train_mnist(epochs, batch_size, z_dim, learning_rate, beta1): """ Train the GAN using the MNIST dataset :param epochs: number of epochs to train """ mnist_dataset = helper.Dataset('mnist', glob(os.path.join(DATA_DIR, 'mnist/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches, mnist_dataset.shape, mnist_dataset.image_mode)
def main(): # hyper parameters z_size = 100 learning_rate = 0.0002 batch_size = 64 epochs = 20 alpha = 0.2 beta1 = 0.5 smooth = 0.0 # show_n_images = 25 # get data celebA_dataset = helper.Dataset( glob(os.path.join(dataset_dir, 'img_align_celeba/*.jpg'))) # Create the network net = DCGAN(celebA_dataset.shape, z_size, learning_rate, alpha=alpha, beta1=beta1, smooth=smooth) assets_dir = './assets/' if not os.path.isdir(assets_dir): os.mkdir(assets_dir) # start training start_time = time.time() losses = train(net, epochs, batch_size, celebA_dataset.get_batches, celebA_dataset.shape) end_time = time.time() total_time = end_time - start_time print('Elapsed time: ', total_time) # 20 epochs: 22986.99 # plot losses fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() plt.savefig('./assets/losses_tf.png') # create animated gif from result images images = [] for e in range(epochs): image_fn = './assets/epoch_{:d}_tf.png'.format(e) images.append(imageio.imread(image_fn)) imageio.mimsave('./assets/by_epochs_tf.gif', images, fps=3)
def main(do_train=True): assets_dir = './assets/' if not os.path.isdir(assets_dir): os.mkdir(assets_dir) # hyper parameters learning_rate = 0.0002 n_epochs = 200 batch_size = 1 pix2pix = Pix2Pix(learning_rate) # configure parameters dataset_name = 'cityscapes' train_name = 'train' test_name = 'val' direction = 'BtoA' train_input_image_dir = '../Data_sets/{}/{}/'.format( dataset_name, train_name) test_input_image_dir = '../Data_sets/{}/{}/'.format( dataset_name, test_name) if do_train: test_dataset = helper.Dataset(test_input_image_dir, convert_to_lab_color=False, direction=direction, is_test=True) test_single_image = test_dataset.get_image_by_index(0) start_time = time.time() losses = train(pix2pix, n_epochs, batch_size, train_input_image_dir, test_single_image, direction, dataset_name) end_time = time.time() total_time = end_time - start_time print('Elapsed time: ', total_time) fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() plt.savefig('./assets/losses_tf.png') else: test(pix2pix, test_input_image_dir, direction) return 0
count = count + 1 iterr = count * show_every # Show example output for the generator images_grid = show_generator_output( sess, 25, input_z, data_shape[3], data_image_mode) dst = os.path.join("output", str(epoch_i), str(iterr) + ".png") pyplot.imsave(dst, images_grid) # saving the model if epoch_i % 10 == 0: if not os.path.exists('./model/'): os.makedirs('./model') saver.save(sess, './model/' + str(epoch_i)) batch_size = 64 z_dim = 100 learning_rate = 0.00025 beta1 = 0.45 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ epochs = 100 celeba_dataset = helper.Dataset('celeba', glob(os.path.join('img_align_celeba/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
# In[14]: # Size input image for discriminator real_size = (128,128,3) # Size of latent vector to generator z_dim = 100 learning_rate_D = .00005 # Thanks to Alexia Jolicoeur Martineau https://ajolicoeur.wordpress.com/cats/ learning_rate_G = 2e-4 # Thanks to Alexia Jolicoeur Martineau https://ajolicoeur.wordpress.com/cats/ batch_size = 64 epochs = 215 alpha = 0.2 beta1 = 0.5 # Create the network #model = DGAN(real_size, z_size, learning_rate, alpha, beta1) # In[15]: # Load the data and train the network here dataset = helper.Dataset(glob(os.path.join(data_resized_dir, '*.jpg'))) with tf.Graph().as_default(): losses, samples = train(epochs, batch_size, z_dim, learning_rate_D, learning_rate_G, beta1, dataset.get_batches, dataset.shape, dataset.image_mode, alpha)
def main(): # Entry point of the file # prepare directories for assets(saves images during training) and checkpoints. assets_dir = './assets/' ckpt_dir = './checkpoints/' if not os.path.isdir(assets_dir): os.mkdir(assets_dir) if not os.path.isdir(ckpt_dir): os.mkdir(ckpt_dir) # parameters to run with open('./training_parameters.json') as json_data: parameter_set = json.load(json_data) # start working for param in parameter_set: fn_ext = param['file_extension'] dataset_name = param['dataset_name'] dataset_base_dir = param['dataset_base_dir'] epochs = param['epochs'] batch_size = param['batch_size'] im_size = param['im_size'] im_channel = param['im_channel'] do_flip = param['do_flip'] is_test = param['is_test'] # make directory per dataset current_assets_dir = './assets/{}/'.format(dataset_name) if not os.path.isdir(current_assets_dir): os.mkdir(current_assets_dir) # set dataset folders according to dataset being used train_dataset_dir_u = dataset_base_dir + '{:s}/train/A/'.format( dataset_name) train_dataset_dir_v = dataset_base_dir + '{:s}/train/B/'.format( dataset_name) val_dataset_dir_u = dataset_base_dir + '{:s}/val/A/'.format( dataset_name) val_dataset_dir_v = dataset_base_dir + '{:s}/val/B/'.format( dataset_name) # prepare network net = DualGAN(im_size=im_size, im_channel_u=im_channel, im_channel_v=im_channel) if not is_test: # load train & validation datasets print(30 * "-") print("Training") train_data_loader = helper.Dataset(train_dataset_dir_u, train_dataset_dir_v, fn_ext, im_size, im_channel, im_channel, do_flip=do_flip, do_shuffle=True) val_data_loader = helper.Dataset(val_dataset_dir_u, val_dataset_dir_v, fn_ext, im_size, im_channel, im_channel, do_flip=False, do_shuffle=False) # start training start_time = time.time() train(net, dataset_name, train_data_loader, val_data_loader, epochs, batch_size) end_time = time.time() total_time = end_time - start_time test_result_str = '[Training]: Data: {:s}, Epochs: {:3f}, Batch_size: {:2d}, Elapsed time: {:3f}\n'.format( dataset_name, epochs, batch_size, total_time) print(test_result_str) with open('./assets/test_summary.txt', 'a') as f: f.write(test_result_str) else: print(30 * "-") print("Validation") # load train datasets val_data_loader = helper.Dataset(val_dataset_dir_u, val_dataset_dir_v, fn_ext, im_size, im_channel, im_channel, do_flip=False, do_shuffle=False) # validation test(net, dataset_name, val_data_loader)
# Show generator output samples so we can see the progress during training if batch_counter % 10 == 0: z_dim = input_z.get_shape().as_list()[-1] Z_noise = np.random.uniform(-1, 1, size=[16, z_dim]) samples = sess.run(generator(input_z, image_channels, False), feed_dict={input_z: Z_noise}) fig = plote(samples) plt.savefig('out_classic/{}.png'.format(str(i).zfill(3)), bbox_inches='tight') i += 1 plt.close(fig) batch_size = 32 z_dim = 150 learning_rate = 0.0002 beta1 = 0.5 n_images = 25 epochs = 20 celeba_dataset = helper.Dataset(DATASET_NAME, glob(os.path.join(data_dir, images_dir))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
parser.add_argument('--loss_each', help='log loss to tensorboard each (default=10)', default=10 ) parser.add_argument('--image_each', help='log image to tensorboard each (default=100)', default=100 ) parser.add_argument('--log_dir', help='set tensorboard log dir (default=log/mnist)', default='log/mnist') parser.add_argument('--data_dir', help='set data dir (default=data/mnist', default='data/mnist') parser.add_argument('--out_dir', help='set output dir (default=output)', default='output') args = parser.parse_args() if args.train: # run train # create GAN gan = MnistGAN() # get data mnist_dataset = helper.Dataset('mnist', glob( args.data_dir + '/*.jpg')) print("number of files: {}".format( len(glob( args.data_dir + '/*.jpg')) )) gan.train(int(args.epochs), args.batch_size, args.z_dim, args.lrate, args.beta1, mnist_dataset.get_batches, mnist_dataset.shape, mnist_dataset.image_mode, args.loss_each, args.image_each, args.log_dir, args.save, args.out_dir)
parser.add_argument('--out_dir', help='set output dir (default=output)', default='output') parser.add_argument('--restore', help='restore file before training', default=None) args = parser.parse_args() if args.setup: # run setup setup() if args.train: # run train # create GAN gan = FaceGAN() # get data celeba_dataset = helper.Dataset('celeba', glob( args.in_dir + '/*.jpg')) print("number of files: {}".format( len(glob( args.in_dir + '/*.jpg')) )) gan.train(int(args.epochs), args.batch_size, args.z_dim, args.lrate, args.beta1, celeba_dataset.get_batches, args.loss_each, args.image_each, args.log_dir, args.save, args.out_dir, args.restore)
import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') data_dir = './data' import os from glob import glob import helper helper.download_extract('mnist', data_dir) helper.download_extract('celeba', data_dir) mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg'))) N, h, w, d = mnist_dataset.shape # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_dim = 128 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 n_units = 128 # Smoothing smooth = 0.05 def model_inputs(real_dim, z_dim):
return losses # hyper parameters z_size = 100 learning_rate = 0.0002 batch_size = 128 epochs = 20 alpha = 0.2 beta1 = 0.5 smooth = 0.0 # show_n_images = 25 # get data celebA_dataset = helper.Dataset(glob(os.path.join(dataset_dir, 'img_align_celeba/*.jpg'))) # Create the network net = DCGAN(celebA_dataset.shape, z_size, learning_rate, alpha=alpha, beta1=beta1, smooth=smooth) assets_dir = './assets/' if not os.path.isdir(assets_dir): os.mkdir(assets_dir) # start training start_time = time.time() losses = train(net, epochs, batch_size, celebA_dataset.get_batches, celebA_dataset.shape) end_time = time.time() total_time = end_time - start_time print('Elapsed time: ', total_time)
batch_size = 100 train_set = [] landmarks_frame = pd.read_csv('./face_landmarks_detects.csv') file_list = landmarks_frame.image_name.values.tolist() avgP_container = np.load('f_avgP_list.npz') emb_container = np.load('f_emb_list.npz') for key in sorted(emb_container, key=lambda x: int(x.strip('arr_'))): batch = avgP_container[key], emb_container[key] if len(batch[0]) == batch_size: train_set.append(batch) t_dataset = helper.Dataset('nf', file_list, 160) print(len(train_set)) test_set = [] t_file_list = glob('./SNF_TESTSET/*') t_avgP_container = np.load('f_avgP_list_wild.npz') t_emb_container = np.load('f_emb_list_wild.npz') for key in sorted(t_emb_container, key=lambda x: int(x.strip('arr_'))): batch = t_avgP_container[key], t_emb_container[key] test_set.append(batch) t_t_dataset = helper.Dataset('nf', t_file_list, 160)
# ### MNIST # 在 MNIST 上测试你的 GANs 模型。经过 2 次迭代,GANs 应该能够生成类似手写数字的图像。确保生成器 (generator) 低于辨别器 (discriminator) 的损失,或接近 0。 # In[13]: batch_size = 64 z_dim = 100 learning_rate = 0.0002 beta1 = 0.3 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ epochs = 2 mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches, mnist_dataset.shape, mnist_dataset.image_mode) # ### CelebA # 在 CelebA 上运行你的 GANs 模型。在一般的GPU上运行每次迭代大约需要 20 分钟。你可以运行整个迭代,或者当 GANs 开始产生真实人脸图像时停止它。 # In[14]: batch_size = 64 z_dim = 100 learning_rate = 0.0001 beta1 = 0.3 """
result = sess.run( generator(example_z, out_channel_dim, is_train=False, reuse=True)) #False代表的是非训练过程,这里的参数是reuse=true #feed_dict={input_z: example_z}) images_grid = helper.images_square_grid(result, image_mode) pyplot.imshow(images_grid, cmap=cmap) pyplot.show() # In[11]: #创造数据集,helper.py中已经定义了Dateset类 mnist_dataset = helper.Dataset( 'mnist', glob(os.path.join('F:/uav+ai/data_for_neural_network/', 'mnist/*.jpg'))) print(mnist_dataset.shape) print(mnist_dataset.image_mode) for batch_images in mnist_dataset.get_batches(128): print(np.shape(batch_images)) break # ### 训练 # 部署 `train` 函数以建立并训练 GANs 模型。记得使用以下你已完成的函数: # - `model_inputs(image_width, image_height, image_channels, z_dim)` # - `model_loss(input_real, input_z, out_channel_dim)` # - `model_opt(d_loss, g_loss, learning_rate, beta1)` # # 使用 `show_generator_output` 函数显示 `generator` 在训练过程中的输出。 #