def main(): start_time = time.time() # Clocking start # loading CelebA DataSet ds = DataSet( height=64, width=64, channel=3, ds_image_path="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", ds_label_path="/home/zero/hdd/DataSet/CelebA/Anno/list_attr_celeba.txt", # ds_image_path="/home/zero/hdd/DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False, # img_scale="-1,1" ) # saving sample images test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3)) iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # EBGAN Model model = ebgan.EBGAN( s, enable_pull_away=True) # using pull away loss # EBGAN-PT # Initializing s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %s" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) # Logging if global_step % train_step['logging_interval'] == 0: summary = s.run(model.merged, feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print( "[+] Epoch %02d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # MNIST Dataset load #mnist = DataSet(ds_path="D:/DataSet/mnist/").data # Loading Cifar-10 DataSet ds = DataSet2( height=32, width=32, channel=3, ds_path= "/media/shar/240A27640A2731EA/shared2/Awesome-GANs-master/BGAN/cifar/", ds_name='cifar-10') ds_iter = DataIterator( x=iu.transform(ds.train_images, '127'), y=ds.train_labels, batch_size=train_step['batch_size'], label_off=True ) # using label # maybe someday, i'll change this param's name # Generated image save test_images = iu.transform(ds.test_images[:100], inv_type='127') iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127') # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # BGAN Model model = bgan.BGAN(s) # Initializing s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') d_loss = 0. d_overpowered = False global_step = saved_global_step start_epoch = global_step // (len(ds.train_images) // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( len(ds.train_images) // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): #batch_x, _ = mnist.train.next_batch(model.batch_size) batch_x, _ = ds_iter.next_batch() batch_x = batch_x.reshape(-1, model.n_input) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network if not d_overpowered: _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') d_overpowered = d_loss < g_loss / 2. # Logging if global_step % train_step['logging_interval'] == 0: batch_x, _ = ds_iter.next_batch() batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) d_loss, g_loss, summary = s.run( [model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Step %08d => " % global_step, " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) samples = np.reshape(samples, [-1] + model.image_shape[1:]) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_1{:08d}.png'.format( global_step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step=global_step) print(sample_dir) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # BEGAN Model model = began.BEGAN(s) # BEGAN # Initializing s.run(tf.global_variables_initializer()) # Celeb-A DataSet images ds = DataSet(input_height=32, input_width=32, input_channel=3, mode='r').images dataset_iter = DataIterator(ds, None, train_step['batch_size'], label_off=True) sample_x = ds[:model.sample_num] sample_x = np.reshape(sample_x, [-1] + model.image_shape[1:]) sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]).astype(np.float32) # Export real image valid_image_height = model.sample_size valid_image_width = model.sample_size sample_dir = results['output'] + 'valid.png' # Generated image save iu.save_images(sample_x, size=[valid_image_height, valid_image_width], image_path=sample_dir) global_step = 0 for epoch in range(train_step['epoch']): for batch_images in dataset_iter.iterate(): batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:]) batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) # Update k_t _, k, m_global = s.run([model.k_update, model.k, model.m_global], feed_dict={ model.x: batch_x, model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Summary _, k, m_global, d_loss, g_loss, summary = s.run([model.k_update, model.k, model.m_global, model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %04d Step %07d =>" % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss), " k : {:.8f}".format(k), " M : {:.8f}".format(m_global)) # Summary saver model.writer.add_summary(summary, epoch) # Training G model with sample image and noise samples = s.run(model.g, feed_dict={ model.x: sample_x, model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}_{1}.png'.format(epoch, global_step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save model.saver.save(s, results['model'], global_step=global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # Training, Test data set # loading Cifar DataSet ds = DataSet(height=32, width=32, channel=3, ds_path='D:\\DataSet/cifar/cifar-10-batches-py/', ds_name='cifar-10') # saving sample images test_images = np.reshape(iu.transform(ds.test_images[:16], inv_type='127'), (16, 32, 32, 3)) iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.train_images, y=None, batch_size=train_step['batch_size'], label_off=True) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # GAN Model model = lsgan.LSGAN(s, train_step['batch_size']) # Initializing variables s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (len(ds.train_images) // model.batch_size) ds_iter.pointer = saved_global_step % (len(ds.train_images) // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epoch']): for batch_x in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, [-1] + model.image_shape[1:]) batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.x: batch_x, model.z: batch_z }) # Logging if global_step % train_step['logging_interval'] == 0: d_loss, g_loss, summary = s.run([model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z }) # Print loss print("[+] Epoch %02d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format(global_step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # AnoGAN Model # anomalies detect off (just training model) -> False # anomalies detect on (based on trained model-> True if not os.path.exists('./model'): detection = False else: detection = True model = anogan.AnoGAN(s, detect=detection) # AnoGAN global_step = 0 if detection: # Load model & Graph & Weights ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] print("[+] global step : %s" % global_step, " successfully loaded") else: print('[-] No checkpoint file found') # Initializing s.run(tf.global_variables_initializer()) # Celeb-A DataSet images ds = DataSet( input_height=64, # in the paper, 108 input_width=64, # in the paper, 108 input_channel=3).images # To-Do # Getting anomaly data dataset_iter = DataIterator(ds, None, train_step['batch_size'], label_off=True) sample_x = ds[:model.sample_num] sample_x = np.reshape(sample_x, [-1] + model.image_shape[1:]) sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) # Export real image valid_image_height = model.sample_size valid_image_width = model.sample_size sample_dir = results['output'] + 'valid.png' # Generated image save iu.save_images(sample_x, size=[valid_image_height, valid_image_width], image_path=sample_dir) for epoch in range(train_step['epoch']): for batch_images in dataset_iter.iterate(): batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:]) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Summary d_loss, g_loss, summary = s.run( [model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %04d Step %07d =>" % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Summary saver model.writer.add_summary(summary, epoch) # Training G model with sample image and noise samples = s.run(model.g, feed_dict={ model.x: sample_x, model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}_{1}.png'.format( epoch, global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save model.saver.save(s, results['model'], global_step=global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # loading CelebA DataSet ds = DataSet( height=64, width=64, channel=3, ds_image_path="D:/DataSet/CelebA/CelebA-64.h5", ds_label_path="D:/DataSet/CelebA/Anno/list_attr_celeba.txt", # ds_image_path="D:/DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="D:/DataSet/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False, img_scale="-1,1") # saving sample images test_images = np.reshape(iu.transform(ds.images[:100], inv_type='127'), (100, 64, 64, 3)) iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # MAGAN Model model = magan.MAGAN(s) # Initializing s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %s" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') n_steps = ds.num_images // model.batch_size # training set size # Pre-Train print("[*] pre-training - getting proper Margin") margin = 0 # 3.0585415484215974 if margin == 0: sum_d_loss = 0. for i in range(2): for batch_x in ds_iter.iterate(): batch_x = np.reshape(iu.transform(batch_x, inv_type='127'), (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) _, d_real_loss = s.run([model.d_op, model.d_real_loss], feed_dict={ model.x: batch_x, model.z: batch_z, model.m: 0., }) sum_d_loss += d_real_loss print("[*] Epoch {:1d} Sum of d_real_loss : {:.8f}".format( i + 1, sum_d_loss)) # Initial margin value margin = (sum_d_loss / n_steps) print("[+] Margin : {0}".format(margin)) old_margin = margin s_g_0 = np.inf # Sg_0 = infinite global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): s_d, s_g = 0., 0. for batch_x in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss, d_real_loss = s.run( [model.d_op, model.d_loss, model.d_real_loss], feed_dict={ model.x: batch_x, model.z: batch_z, model.m: margin, }) # Update D real sample s_d += np.sum(d_real_loss) # Update G network _, g_loss, d_fake_loss = s.run( [model.g_op, model.g_loss, model.d_fake_loss], feed_dict={ model.x: batch_x, model.z: batch_z, model.m: margin, }) # Update G fake sample s_g += np.sum(d_fake_loss) # Logging if global_step % train_step['logging_interval'] == 0: summary = s.run(model.merged, feed_dict={ model.x: batch_x, model.z: batch_z, model.m: margin, }) # Print loss print( "[+] Epoch %03d Global Step %05d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, model.m: margin, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 # Update margin if s_d / n_steps < margin and s_d < s_g and s_g_0 <= s_g: margin = s_d / n_steps print("[*] Margin updated from {:8f} to {:8f}".format( old_margin, margin)) old_margin = margin s_g_0 = s_g # Convergence Measure e_d = s_d / n_steps e_g = s_g / n_steps l_ = e_d + np.abs(e_d - e_g) print("[+] Epoch %03d " % epoch, " L : {:.8f}".format(l_)) end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # DCGAN model model = dcgan.DCGAN(s, batch_size=train_step['batch_size']) # Load model & Graph & Weights ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] print("[+] global step : %s" % global_step, " successfully loaded") else: global_step = 0 print('[-] No checkpoint file found') # Initializing variables s.run(tf.global_variables_initializer()) # Training, test data set dataset = DataSet(input_height=32, input_width=32, input_channel=3, name='cifar-100') dataset_iter = DataIterator(dataset.train_images, dataset.train_labels, train_step['batch_size']) sample_x = dataset.valid_images[:model.sample_num].astype( np.float32) / 225. sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]) d_overpowered = False # G loss > D loss * 2 step = int(global_step) cont = int(step / 750) for epoch in range(cont, cont + train_step['epoch']): for batch_images, _ in dataset_iter.iterate(): batch_x = batch_images.astype(np.float32) / 225. batch_z = np.random.uniform( -1., 1., [train_step['batch_size'], model.z_dim]).astype(np.float32) # Update D network if not d_overpowered: _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={model.z: batch_z}) d_overpowered = d_loss < g_loss / 2. if step % train_step['logging_interval'] == 0: batch_z = np.random.uniform( -1., 1., [train_step['batch_size'], model.z_dim]).astype( np.float32) d_loss, g_loss, summary = s.run( [model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) d_overpowered = d_loss < g_loss / 2. # Print loss print("[+] Epoch %03d Step %05d => " % (epoch, step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise samples = s.run(model.g, feed_dict={ model.x: sample_x, model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results[ 'output'] + 'train_{0}_{1}.png'.format(epoch, step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save model.saver.save(s, results['model'], global_step=step) step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
ds_type="CelebA", use_save=False, save_file_name="/media/shar/240A27640A2731EA/shared2/Awesome-GANs-master/BGAN/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False #img_scale="-1,1") # saving sample images test_images2 = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3)) iu2.save_images(test_images2, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) d_loss = 0. d_overpowered = False global_step = saved_global_step start_epoch = global_step // (len(ds.train_images) // model.batch_size) # recover n_epoch ds_iter.pointer = saved_global_step % (len(ds.train_images) // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x in range(ds_iter.iterate()): #batch_x, _ = mnist.train.next_batch(model.batch_size) batch_x, _ = ds_iter.next_batch() #batch_x = batch_x.reshape(-1, model.n_input) batch_x = np.reshape(iu.transform(batch_x, inv_type='127'), (model.batch_size, model.height, model.width, model.channel))
def main(): start_time = time.time() # Clocking start # Loading Cifar-10 DataSet ds = DataSet(height=32, width=32, channel=3, ds_path="D:/DataSet/cifar/cifar-10-batches-py/", ds_name='cifar-10') ds_iter = DataIterator( x=iu.transform(ds.train_images, '127'), y=ds.train_labels, batch_size=train_step['batch_size'], label_off=False ) # using label # maybe someday, i'll change this param's name # Generated image save test_images = iu.transform(ds.test_images[:100], inv_type='127') iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127') # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # ACGAN Model model = acgan.ACGAN(s, batch_size=train_step['batch_size'], n_classes=ds.n_classes) # Initializing s.run(tf.global_variables_initializer()) sample_y = np.zeros(shape=[model.sample_num, model.n_classes]) for i in range(10): sample_y[10 * i:10 * (i + 1), i] = 1 saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (len(ds.train_images) // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( len(ds.train_images) // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x, batch_y in ds_iter.iterate(): batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.y: batch_y, model.z: batch_z, }) # Update G/C networks _, g_loss, _, c_loss = s.run( [model.g_op, model.g_loss, model.c_op, model.c_loss], feed_dict={ model.x: batch_x, model.y: batch_y, model.z: batch_z, }) if global_step % train_step['logging_interval'] == 0: batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) d_loss, g_loss, c_loss, summary = s.run([ model.d_loss, model.g_loss, model.c_loss, model.merged ], feed_dict={ model.x: batch_x, model.y: batch_y, model.z: batch_z, }) # Print loss print( "[+] Epoch %04d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss), " C loss : {:.8f}".format(c_loss)) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.y: sample_y, model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: if os.path.exists("./orig-model/"): detect = True # There has to be pre-trained file else: detect = False # AnoGAN Model model = anogan.AnoGAN(detect=detect, use_label=False) # AnoGAN # Initializing s.run(tf.global_variables_initializer()) # loading CelebA DataSet ds = DataSet(height=64, width=64, channel=3, ds_image_path="D:\\DataSet/CelebA/CelebA-64.h5", ds_label_path="D:\\DataSet/CelebA/Anno/list_attr_celeba.txt", # ds_image_path="D:\\DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="D:\\DataSet/CelebA/CelebA-128.h5", save_type="to_h5", use_img_scale=False, # img_scale="-1,1" ) # saving sample images test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3)) iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) # To-Do # Getting anomaly data # Load model & Graph & Weights if not detect or not os.path.exists("./ano-model/"): ckpt = tf.train.get_checkpoint_state('./orig-model/') else: ckpt = tf.train.get_checkpoint_state('./ano-model/') saved_global_step = 0 if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size) # recover n_epoch ds_iter.pointer = saved_global_step % (ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epoch']): for batch_images in ds_iter.iterate(): batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:]) batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Summary d_loss, g_loss, summary = s.run([model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %04d Step %07d =>" % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Summary saver model.writer.add_summary(summary, epoch) # Training G model with sample image and noise sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g_test, feed_dict={ model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}_{1}.png'.format(epoch, global_step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save if not detect: model.saver.save(s, results['orig-model'], global_step=global_step) else: model.saver.save(s, results['ano-model'], global_step=global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # GPU configure gpu_config = tf.GPUOptions(allow_growth=True) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_config) with tf.Session(config=config) as s: # BEGAN Model model = began.BEGAN(s) # BEGAN # Initializing s.run(tf.global_variables_initializer()) # Celeb-A DataSet images ds = DataSet(height=64, width=64, channel=3, ds_path="/home/zero/hdd/DataSet/CelebA/", ds_type="CelebA").images ds_iter = DataIterator(ds, None, train_step['batch_size'], label_off=True) global_step = 0 for epoch in range(train_step['epoch']): for batch_images in ds_iter.iterate(): batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:]) batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) # Update k_t _, k, m_global = s.run([model.k_update, model.k, model.m_global], feed_dict={ model.x: batch_x, model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: _, k, m_global, d_loss, g_loss, summary = s.run([model.k_update, model.k, model.m_global, model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %03d Step %07d =>" % (epoch, global_step), " D loss : {:.6f}".format(d_loss), " G loss : {:.6f}".format(g_loss), " k : {:.6f}".format(k), " M : {:.6f}".format(m_global)) # Summary saver model.writer.add_summary(summary, global_step) # Training G model with sample image and noise sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}.png'.format(global_step) # Generated image save iu.save_images(samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step=global_step) # Learning Rate update if global_step and global_step % model.lr_update_step == 0: s.run([model.g_lr_update, model.d_lr_update]) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # Training, test data set ds = DataSet(height=32, width=32, channel=3, ds_path='D:\\DataSet/cifar/cifar-10-batches-py/', ds_name='cifar-10') ds_iter = DataIterator(ds.train_images, ds.train_labels, train_step['batch_size']) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # LAPGAN model model = lapgan.LAPGAN(s, batch_size=train_step['batch_size']) # Initializing variables s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %s" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') sample_y = np.zeros(shape=[model.sample_num, model.n_classes]) for i in range(10): sample_y[10 * i:10 * (i + 1), i] = 1 global_step = saved_global_step start_epoch = global_step // (len(ds.train_images) // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( len(ds.train_images) // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epoch']): for batch_images, batch_labels in ds_iter.iterate(): batch_x = iu.transform(batch_images, inv_type='127') z = [] for i in range(3): z.append( np.random.uniform( -1., 1., [train_step['batch_size'], model.z_noises[i]])) # Update D/G networks img_fake, img_coarse, d_loss_1, g_loss_1, \ _, _, _, d_loss_2, g_loss_2, \ _, _, d_loss_3, g_loss_3, \ _, _, _, _, _, _ = s.run([ model.g[0], model.x1_coarse, model.d_loss[0], model.g_loss[0], model.x2_fine, model.g[1], model.x2_coarse, model.d_loss[1], model.g_loss[1], model.x3_fine, model.g[2], model.d_loss[2], model.g_loss[2], model.d_op[0], model.g_op[0], model.d_op[1], model.g_op[1], model.d_op[2], model.g_op[2], ], feed_dict={ model.x1_fine: batch_x, # images model.y: batch_labels, # classes model.z[0]: z[0], model.z[1]: z[1], model.z[2]: z[2], # z-noises model.do_rate: 0.5, }) # Logging if global_step % train_step['logging_interval'] == 0: batch_x = ds.test_images[np.random.randint( 0, len(ds.test_images), model.sample_num)] batch_x = iu.transform(batch_x, inv_type='127') z = [] for i in range(3): z.append( np.random.uniform( -1., 1., [model.sample_num, model.z_noises[i]])) # Update D/G networks img_fake, img_coarse, d_loss_1, g_loss_1, \ _, _, _, d_loss_2, g_loss_2, \ _, _, d_loss_3, g_loss_3, \ _, _, _, _, _, _, summary = s.run([ model.g[0], model.x1_coarse, model.d_loss[0], model.g_loss[0], model.x2_fine, model.g[1], model.x2_coarse, model.d_loss[1], model.g_loss[1], model.x3_fine, model.g[2], model.d_loss[2], model.g_loss[2], model.d_op[0], model.g_op[0], model.d_op[1], model.g_op[1], model.d_op[2], model.g_op[2], model.merged, ], feed_dict={ model.x1_fine: batch_x, # images model.y: sample_y, # classes model.z[0]: z[0], model.z[1]: z[1], model.z[2]: z[2], # z-noises model.do_rate: 0., }) # Print loss d_loss = (d_loss_1 + d_loss_2 + d_loss_3) / 3. g_loss = (g_loss_1 + g_loss_2 + g_loss_3) / 3. print( "[+] Epoch %03d Step %05d => " % (epoch, global_step), " Avg D loss : {:.8f}".format(d_loss), " Avg G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise samples = img_fake + img_coarse # Summary saver model.writer.add_summary(summary, global_step) # time saving # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: if os.path.exists("./orig-model/"): detect = True # There has to be pre-trained file else: detect = False # AnoGAN Model model = anogan.AnoGAN(detect=detect, use_label=False) # AnoGAN global_step = 0 # Load model & Graph & Weights if not detect or not os.path.exists("./ano-model/"): ckpt = tf.train.get_checkpoint_state('./orig-model/') else: ckpt = tf.train.get_checkpoint_state('./ano-model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] print("[+] global step : %s" % global_step, " successfully loaded") else: print('[-] No checkpoint file found') # Initializing s.run(tf.global_variables_initializer()) # Celeb-A DataSet images ds = DataSet(height=64, width=64, channel=3, ds_path="/home/zero/hdd/DataSet/CelebA/", ds_type="CelebA").images # To-Do # Getting anomaly data ds_iter = DataIterator(ds, None, train_step['batch_size'], label_off=True) for epoch in range(train_step['epoch']): for batch_images in ds_iter.iterate(): batch_x = np.reshape(batch_images, [-1] + model.image_shape[1:]) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Summary d_loss, g_loss, summary = s.run( [model.d_loss, model.g_loss, model.merged], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %04d Step %07d =>" % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Summary saver model.writer.add_summary(summary, epoch) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g_test, feed_dict={ model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}_{1}.png'.format( epoch, global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save if not detect: model.saver.save(s, results['orig-model'], global_step=global_step) else: model.saver.save(s, results['ano-model'], global_step=global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # Celeb-A DataSet images ds = DataSet(input_height=1024, input_width=1024, input_channel=3, ds_type="CelebA-HQ", ds_path="/home/zero/hdd/DataSet/CelebA-HQ").images n_ds = 30000 dataset_iter = DataIterator(ds, None, train_step['batch_size'], label_off=True) rnd = random.randint(0, n_ds) sample_x = ds[rnd] sample_x = np.reshape(sample_x, [-1, 1024, 1024, 3]) # Export real image valid_image_height = 1 valid_image_width = 1 sample_dir = results['output'] + 'valid.png' # Generated image save iu.save_images(sample_x, size=[valid_image_height, valid_image_width], image_path=sample_dir, inv_type='127') print("[+] sample image saved!") print("[+] pre-processing took {:.8f}s".format(time.time() - start_time)) # GPU configure gpu_config = tf.GPUOptions(allow_growth=True) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_config) for idx, n_pg in enumerate(pg): with tf.Session(config=config) as s: pg_t = False if idx % 2 == 0 else True # PGGAN Model model = pggan.PGGAN(s, pg=n_pg, pg_t=pg_t) # PGGAN # Initializing s.run(tf.global_variables_initializer()) if not n_pg == 1 and not n_pg == 7: if pg_t: model.r_saver.restore( s, results['model'] + '%d-%d.ckpt' % (idx, r_pg[idx])) model.out_saver.restore( s, results['model'] + '%d-%d.ckpt' % (idx, r_pg[idx])) else: model.saver.restore( s, results['model'] + '%d-%d.ckpt' % (idx, r_pg[idx])) global_step = 0 for epoch in range(train_step['epoch']): # Later, adding n_critic for optimizing D net for batch_images in dataset_iter.iterate(): batch_x = np.reshape(batch_images, (-1, 128, 128, 3)) batch_x = (batch_x + 1.) * 127.5 # re-scaling to (0, 255) batch_x = image_resize(batch_x, s=model.output_size) batch_x = (batch_x / 127.5) - 1. # re-scaling to (-1, 1) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) if pg_t and not pg == 0: alpha = global_step / 32000. low_batch_x = zoom(batch_x, zoom=[1., .5, .5, 1.]) low_batch_x = zoom(low_batch_x, zoom=[1., 2., 2., 1.]) batch_x = alpha * batch_x + (1. - alpha) * low_batch_x # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) # Update alpha_trans s.run(model.alpha_trans_update, feed_dict={model.step_pl: global_step}) if global_step % train_step['logging_step'] == 0: gp, d_loss, g_loss, summary = s.run([ model.gp, model.d_loss, model.g_loss, model.merged ], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print( "[+] PG %d Epoch %03d Step %07d =>" % (n_pg, epoch, global_step), " D loss : {:.6f}".format(d_loss), " G loss : {:.6f}".format(g_loss), " GP : {:.6f}".format(gp), ) # Summary saver model.writer.add_summary(summary, global_step) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) samples = np.clip(samples, -1, 1) # Export image generated by model G sample_image_height = 1 sample_image_width = 1 sample_dir = results[ 'output'] + 'train_{0}.png'.format(global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'] + '%d-%d.ckpt' % (idx, n_pg), global_step=global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time))
def main(): start_time = time.time() # Clocking start config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # LAPGAN model # D/G Models are same as DCGAN model = lapgan.LAPGAN(s, batch_size=train_step['batch_size']) # Initializing variables s.run(tf.global_variables_initializer()) # Training, test data set dataset = DataSet(input_height=32, input_width=32, input_channel=3, name='cifar-10') dataset_iter = DataIterator(dataset.train_images, dataset.train_labels, train_step['batch_size']) step = 0 cont = int(step / 750) for epoch in range(cont, cont + train_step['epoch']): for batch_images, batch_labels in dataset_iter.iterate(): batch_images = batch_images.astype(np.float32) / 225. z = [] for i in range(3): z.append( np.random.uniform( -1., 1., [train_step['batch_size'], model.z_noises[i] ]).astype(np.float32)) # Update D/G networks img_fake, _, _, img_coarse, d_loss_1, g_loss_1, \ _, _, _, _, _, d_loss_2, g_loss_2, \ _, _, _, _, d_loss_3, g_loss_3, \ _, _, _, _, _, _ = s.run([ model.g[0], model.d_reals_prob[0], model.d_fakes_prob[0], model.x1_coarse, model.d_loss[0], model.g_loss[0], model.x2_fine, model.g[1], model.d_reals_prob[1], model.d_fakes_prob[1], model.x2_coarse, model.d_loss[1], model.g_loss[1], model.x3_fine, model.g[2], model.d_reals_prob[2], model.d_fakes_prob[2], model.d_loss[2], model.g_loss[2], model.d_op[0], model.g_op[0], model.d_op[1], model.g_op[1], model.d_op[2], model.g_op[2], # D/G ops ], feed_dict={ model.x1_fine: batch_images, # images model.y: batch_labels, # classes model.z[0]: z[0], model.z[1]: z[1], model.z[2]: z[2] # z-noises }) # Logging if step % train_step['logging_interval'] == 0: batch_x = batch_images[:model.sample_num] batch_y = batch_labels[:model.sample_num] z = [] for i in range(3): z.append( np.random.uniform( -1., 1., [model.sample_num, model.z_noises[i]]).astype( np.float32)) # Update D/G networks img_fake, _, _, img_coarse, d_loss_1, g_loss_1, \ _, _, _, _, _, d_loss_2, g_loss_2, \ _, _, _, _, d_loss_3, g_loss_3, \ _, _, _, _, _, _, summary = s.run([ model.g[0], model.d_reals_prob[0], model.d_fakes_prob[0], model.x1_coarse, model.d_loss[0], model.g_loss[0], model.x2_fine, model.g[1], model.d_reals_prob[1], model.d_fakes_prob[1], model.x2_coarse, model.d_loss[1], model.g_loss[1], model.x3_fine, model.g[2], model.d_reals_prob[2], model.d_fakes_prob[2], model.d_loss[2], model.g_loss[2], model.d_op[0], model.g_op[0], model.d_op[1], model.g_op[1], model.d_op[2], model.g_op[2], model.merged, ], feed_dict={ model.x1_fine: batch_x, # images model.y: batch_y, # classes model.z[0]: z[0], model.z[1]: z[1], model.z[2]: z[2] # z-noises }) # Print loss print("[+] Epoch %03d Step %05d => " % (epoch, step), " D loss : {:.8f}".format(d_loss_1.mean()), " G loss : {:.8f}".format(g_loss_1.mean())) # Training G model with sample image and noise samples = img_fake + img_coarse # Summary saver model.writer.add_summary(summary, step) # time saving # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results[ 'output'] + 'train_{0}_{1}.png'.format(epoch, step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir) # Model save model.saver.save(s, results['model'], global_step=step) # time saving step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start height, width, channel = 128, 128, 3 # loading CelebA DataSet # from 'raw images' or 'h5' use_h5 = True if not use_h5: ds = DataSet( height=height, width=height, channel=channel, # ds_image_path="D:\\DataSet/CelebA/CelebA-%d.h5" % height, ds_label_path=os.path.join(cfg.celeba, "Anno/list_attr_celeba.txt"), ds_image_path=os.path.join(cfg.celeba, "Img/img_align_celeba/"), ds_type="CelebA", use_save=True, save_file_name=os.path.join(cfg.celeba, "CelebA-%d.h5" % height), save_type="to_h5", use_img_scale=False, ) else: ds = DataSet( height=height, width=height, channel=channel, ds_image_path=os.path.join(cfg.celeba, "CelebA-%d.h5" % height), ds_label_path=os.path.join(cfg.celeba, "Anno/list_attr_celeba.txt"), # ds_image_path=os.path.join(cfg.celeba, "Img/img_align_celeba/"), ds_type="CelebA", use_save=False, # save_file_name=os.path.join(cfg.celeba, "CelebA-%d.h5" % height), # save_type="to_h5", use_img_scale=False, ) num_images = ds.num_images # saving sample images test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, height, width, channel)) iu.save_images(test_images, size=[4, 4], image_path=os.path.join(cfg.output, "sample.png"), inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) del ds # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # SAGAN Model model = sagan.SAGAN(s, height=height, width=width, channel=channel, batch_size=train_step['batch_size'], use_gp=False, use_hinge_loss=True) # Initializing s.run(tf.global_variables_initializer()) print("[*] Reading checkpoints...") saved_global_step = 0 ckpt = tf.train.get_checkpoint_state(cfg.model_path) if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % (num_images // model.batch_size ) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) if global_step % train_step['logging_interval'] == 0: summary = s.run(model.merged, feed_dict={ model.x: batch_x, model.z: batch_z, }) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g_test, feed_dict={ model.z_test: sample_z, }) # is_mean, is_std = t.inception_score(iu.inverse_transform(samples, inv_type='127')) # fid_score = t.fid_score(real_img=batch_x, fake_img=samples[:model.batch_size]) # Print loss print( "[+] Epoch %04d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss), # " Inception Score : {:.2f} (±{:.2f})".format(is_mean, is_std), # " FID Score : {:.2f}".format(fid_score) ) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = os.path.join( cfg.output, 'train_{:08d}.png'.format(global_step)) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save( s, os.path.join(cfg.model_path, "SAGAN.ckpt"), global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # loading CelebA DataSet ds = DataSet( height=64, width=64, channel=3, ds_image_path="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", ds_label_path="/home/zero/hdd/DataSet/CelebA/Anno/list_attr_celeba.txt", # ds_image_path="/home/zero/hdd/DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False, # img_scale="-1,1" ) # saving sample images test_images = np.reshape(iu.transform(ds.images[:100], inv_type='127'), (100, 64, 64, 3)) iu.save_images(test_images, size=[10, 10], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=None, batch_size=train_step['batch_size'], label_off=True) # GPU configure gpu_config = tf.GPUOptions(allow_growth=True) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_config) with tf.Session(config=config) as s: # BEGAN Model model = began.BEGAN(s, batch_size=train_step['batch_size'], gamma=0.5) # BEGAN # Initializing s.run(tf.global_variables_initializer()) print("[*] Reading checkpoints...") saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: # Restores from checkpoint model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %d" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epoch']): for batch_x in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.z: batch_z, }) # Update k_t _, k, m_global = s.run( [model.k_update, model.k, model.m_global], feed_dict={ model.x: batch_x, model.z: batch_z, }) if global_step % train_step['logging_step'] == 0: summary = s.run(model.merged, feed_dict={ model.x: batch_x, model.z: batch_z, }) # Print loss print("[+] Epoch %03d Step %07d =>" % (epoch, global_step), " D loss : {:.6f}".format(d_loss), " G loss : {:.6f}".format(g_loss), " k : {:.6f}".format(k), " M : {:.6f}".format(m_global)) # Summary saver model.writer.add_summary(summary, global_step) # Training G model with sample image and noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) samples = s.run(model.g, feed_dict={ model.z: sample_z, }) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{0}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step=global_step) # Learning Rate update if global_step and global_step % model.lr_update_step == 0: s.run([model.g_lr_update, model.d_lr_update]) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()
def main(): start_time = time.time() # Clocking start # loading CelebA DataSet labels = [ 'Black_Hair', 'Blond_Hair', 'Blurry', 'Eyeglasses', 'Gray_Hair', 'Male', 'Smiling', 'Wavy_Hair', 'Wearing_Hat', 'Young' ] ds = DataSet( height=64, width=64, channel=3, ds_image_path="/home/zero/hdd/DataSet/CelebA/CelebA-64.h5", ds_label_path="/home/zero/hdd/DataSet/CelebA/Anno/list_attr_celeba.txt", attr_labels=labels, # ds_image_path="D:\\DataSet/CelebA/Img/img_align_celeba/", ds_type="CelebA", use_save=False, save_file_name="D:\\DataSet/CelebA/CelebA-64.h5", save_type="to_h5", use_img_scale=False, # img_scale="-1,1" ) # saving sample images test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'), (16, 64, 64, 3)) iu.save_images(test_images, size=[4, 4], image_path=results['output'] + 'sample.png', inv_type='127') ds_iter = DataIterator(x=ds.images, y=ds.labels, batch_size=train_step['batch_size'], label_off=False) # GPU configure config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as s: # InfoGAN Model model = infogan.InfoGAN(s, height=64, width=64, channel=3, batch_size=train_step['batch_size'], n_categories=len(ds.labels)) # fixed z-noise sample_z = np.random.uniform( -1., 1., [model.sample_num, model.z_dim]).astype(np.float32) # Initializing s.run(tf.global_variables_initializer()) # Load model & Graph & Weights saved_global_step = 0 ckpt = tf.train.get_checkpoint_state('./model/') if ckpt and ckpt.model_checkpoint_path: model.saver.restore(s, ckpt.model_checkpoint_path) saved_global_step = int( ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) print("[+] global step : %s" % saved_global_step, " successfully loaded") else: print('[-] No checkpoint file found') global_step = saved_global_step start_epoch = global_step // (ds.num_images // model.batch_size ) # recover n_epoch ds_iter.pointer = saved_global_step % ( ds.num_images // model.batch_size) # recover n_iter for epoch in range(start_epoch, train_step['epochs']): for batch_x, batch_y in ds_iter.iterate(): batch_x = iu.transform(batch_x, inv_type='127') batch_x = np.reshape(batch_x, (model.batch_size, model.height, model.width, model.channel)) batch_z = np.random.uniform( -1., 1., [model.batch_size, model.z_dim]).astype(np.float32) batch_z_con = gen_continuous(model.batch_size, model.n_continous_factor) batch_z_cat = gen_category(model.batch_size, model.n_categories) batch_c = np.concatenate((batch_z_con, batch_z_cat), axis=1) # Update D network _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Update G network _, g_loss = s.run([model.g_op, model.g_loss], feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Logging if global_step % train_step['logging_interval'] == 0: summary = s.run(model.merged, feed_dict={ model.c: batch_c, model.x: batch_x, model.z: batch_z, }) # Print loss print( "[+] Epoch %02d Step %08d => " % (epoch, global_step), " D loss : {:.8f}".format(d_loss), " G loss : {:.8f}".format(g_loss)) # Training G model with sample image and noise sample_z_con = np.zeros( (model.sample_num, model.n_continous_factor)) for i in range(10): sample_z_con[10 * i:10 * (i + 1), 0] = np.linspace(-2, 2, 10) sample_z_cat = np.zeros( (model.sample_num, model.n_categories)) for i in range(10): sample_z_cat[10 * i:10 * (i + 1), i] = 1 sample_c = np.concatenate((sample_z_con, sample_z_cat), axis=1) samples = s.run(model.g, feed_dict={ model.c: sample_c, model.z: sample_z, }) # Summary saver model.writer.add_summary(summary, global_step) # Export image generated by model G sample_image_height = model.sample_size sample_image_width = model.sample_size sample_dir = results['output'] + 'train_{:08d}.png'.format( global_step) # Generated image save iu.save_images( samples, size=[sample_image_height, sample_image_width], image_path=sample_dir, inv_type='127') # Model save model.saver.save(s, results['model'], global_step) global_step += 1 end_time = time.time() - start_time # Clocking end # Elapsed time print("[+] Elapsed time {:.8f}s".format(end_time)) # Close tf.Session s.close()