def main(): mnist = input_data.read_data_sets('../data/MNIST_data', one_hot=True) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # GAN Model model = infogan.InfoGAN(s, is_train=False) s.run(tf.global_variables_initializer()) saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: ckpt_name = os.path.basename(ckpt.model_checkpoint_path) saver.restore(s, os.path.join('./model/', ckpt_name)) else: print("Cannot restore checkpoint!") return False sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) # Create conditional one-hot vector, with index 5 = 1 sample_ccat = np.zeros(shape=[model.sample_num, model.n_cat]) sample_ccat[:, 5] = 1 sample_ccont = np.random.uniform(-1., 1., [model.sample_num, model.n_cont]) sample_c = np.concatenate((sample_ccat, sample_ccont), axis=1) print(sample_c[0]) sample_x, _ = mnist.train.next_batch(model.sample_num) sample_x = np.reshape( sample_x, [-1, model.input_height, model.input_width, model.input_channel]) samples = s.run(model.g, feed_dict={ model.x: sample_x, model.z: sample_z, model.c: sample_c }) samples = np.reshape( samples, [-1, model.output_height, model.output_width, model.input_channel]) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'test.png' # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # MNIST Dataset load mnist = DataSet().data # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # InfoGAN Model model = infogan.InfoGAN(s) # Initializing s.run(tf.global_variables_initializer()) sample_x, sample_y = mnist.test.next_batch(model.sample_num) sample_x = np.reshape(sample_x, [-1] + model.image_shape[1:]) sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) sample_c = np.concatenate( (sample_y, np.zeros([model.sample_num, model.n_cont])), axis=1) d_overpowered = False for step in range(train_step['global_step']): batch_x, batch_y = mnist.train.next_batch(model.batch_size) batch_x = np.reshape(batch_x, [-1] + model.image_shape[1:]) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) batch_c = np.concatenate( (batch_y, np.random.uniform(-1., 1., [model.batch_size, 2])), axis=1) # Update D network if not d_overpowered: _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, model.c: batch_c, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.x: batch_x, model.z: batch_z, model.c: batch_c, }) d_overpowered = d_loss < g_loss / 2 # Logging if step % train_step['logging_interval'] == 0: batch_x, batch_y = mnist.test.next_batch(model.batch_size) batch_x = np.reshape(batch_x, [-1] + model.image_shape[1:]) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) batch_c = np.concatenate( (batch_y, np.random.uniform(-1., 1., [model.batch_size, 2])), axis=1) d_loss, g_loss, summary = s.run( [model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, model.c: batch_c, }) d_overpowered = d_loss < g_loss / 2 # Print loss print("[+] Step %08d => " % step, " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise samples = s.run(model.g, feed_dict={ model.x: sample_x, model.z: sample_z, model.c: sample_c, }) # Summary saver model.writer.add_summary(summary, step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save model.saver.save(s, results['model'], global_step=step) end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # loading CelebA DataSet labels = [ 'Black_Hair', 'Blond_Hair', 'Blurry', 'Eyeglasses', 'Gray_Hair', 'Male', 'Smiling', 'Wavy_Hair', 'Wearing_Hat', 'Young' ] ds = DataSet( height=64, width=64, channel=3, ds_image_path="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", ds_label_path="/home/zero/hdd/DataSet/CelebA/Anno/list_attr_celeba.txt", attr_labels=labels, # ds_image_path="D:\\DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="D:\\DataSet/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False, # img_scale="-1,1" ) # saving sample images test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3)) iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=ds.labels, batch_size=train_step['batch_size'], label_off=False) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # InfoGAN Model model = infogan.InfoGAN(s, height=64, width=64, channel=3, batch_size=train_step['batch_size'], n_categories=len(ds.labels)) # fixed z-noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) # Initializing s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %s" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x, batch_y in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) batch_z_con = gen_continuous(model.batch_size, model.n_continous_factor) batch_z_cat = gen_category(model.batch_size, model.n_categories) batch_c = np.concatenate((batch_z_con, batch_z_cat), axis=1) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Logging if global_step % train_step['logging_interval'] == 0: summary = s.run(model.merged, feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Print loss print( "[+] Epoch %02d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z_con = np.zeros( (model.sample_num, model.n_continous_factor)) for i in range(10): sample_z_con[10 * i:10 * (i + 1), 0] = np.linspace(-2, 2, 10) sample_z_cat = np.zeros( (model.sample_num, model.n_categories)) for i in range(10): sample_z_cat[10 * i:10 * (i + 1), i] = 1 sample_c = np.concatenate((sample_z_con, sample_z_cat), axis=1) samples = s.run(model.g, feed_dict={ model.c: sample_c, model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()