def main(**kwargs): from types import SimpleNamespace _ = SimpleNamespace(**kwargs) loader = ImageLoader(_.folder) data = loader.setup(datalen=_.datalen) dcgan = DCGAN(loader.shape_x, loader.shape_y, loader.channels, data) dcgan.train(epochs=_.epochs, batch_size=_.batch_size, save_interval=50)
def run_test(): model_object = DCGAN(dataset, label, data_object.input_size, class_name, generator_arch, discriminator_arch, encoder_arch, learning_rate, batch_size) if label is None: model_object.train(x, epochs) else: index = list(np.where(y[:, label] == 1)[0]) x_positive = x[index] model_object.train(x_positive, epochs)
def main(argv=None): log_dir, model_dir = generate_log_model_dirs(FLAGS.root_logdir, FLAGS.root_model_dir) create_path(log_dir) create_path(model_dir) tf.reset_default_graph() with tf.Session() as sess: dcgan_nn = DCGAN(sess, log_dir, model_dir) dcgan_nn.build_graph(FLAGS) dcgan_nn.train(FLAGS)
def init(): dcgan = DCGAN() if path.exists("%s/generator.h5" % modelSaveLocation): print("Model is trained. Loading from %s" % modelSaveLocation) dcgan.loadWeights(modelSaveLocation) dcgan.generate("./images/generated/example.png") else: data = loadData() print("Model is not trained. Loaded %d images for training" % len(data)) dcgan.train(data=data, epochs=2000, batch_size=32, save_interval=50) print("Model is trained. Saving to %s" % modelSaveLocation) dcgan.saveWeights(modelSaveLocation)
def main(): flags = tf.app.flags flags.DEFINE_integer("epoch", 25, "Epoch to train [25]") flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]") flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]") FLAGS = flags.FLAGS config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: dcgan = DCGAN(sess) dcgan.train(FLAGS)
def run_gan_main(input_f, name='normal', generated_num=10000, output_dir='log', epochs=10, show_flg=True, gan_type='naive_gan', time_str='', **kwargs): # step 1 achieve train set train = TrafficDataset(input_f, transform=None, normalization_flg=False) print('\'%s\' train size : (%d, %d) used for training \'%s\' naive_gan.' % (name, len(train.X), len(train.X[0]), name)) # step 2.1 initialize gan if gan_type == 'dcgan': gan_m = DCGAN(num_epochs=epochs, num_features=len(train.X[0]), batch_size=64, show_flg=show_flg, output_dir=output_dir, GAN_name=name, time_str=time_str) else: # default gan_type gan_m = NaiveGAN(num_epochs=epochs, num_features=len(train.X[0]), batch_size=64, show_flg=show_flg, output_dir=output_dir, GAN_name=name, time_str=time_str) # step 2.2 train gan model print('\nTraining begins ...') gan_m.train(train) print('Train finished.') # step 3.1 dump model model_file = dump_model(gan_m, os.path.join(output_dir, 'gan_%s_model.p' % name)) # step 3.2 load model gan_m = load_model(model_file) # step 4 generated more data print('generated_num is', generated_num) gen_data = gan_m.generate_data(generated_num) output_f = save_data(np.asarray(gen_data).tolist(), output_f=os.path.join( output_dir, 'gan_%s_model' % name + '_generated_%s_samples.csv' % str(generated_num))) return output_f, gan_m.gan_loss_file, gan_m.gan_decision_file
def train(latent_dim, height, width, channels): (X_train, Y_train), (_, _) = tf.keras.datasets.mnist.load_data() # X_train = X_train[0: 2000] X_train = X_train.reshape((X_train.shape[0],) + (height, width, channels)).astype('float32') X_train = normalize(X_train) epochs = 20 # epochs = 2 batch_size = 128 iterations = X_train.shape[0]//batch_size dcgan = DCGAN(latent_dim, height, width, channels) for epoch in range(epochs): for iteration in range(iterations): real_images = X_train[iteration*batch_size:(iteration+1)*batch_size] d_loss, g_loss = dcgan.train(real_images, batch_size) if (iteration + 1) % 10 == 0: print('{} / {}'.format(iteration + 1, iterations)) print('discriminator loss: {}'.format(d_loss)) print('generator loss: {}'.format(g_loss)) print() with open('loss.txt', 'a') as f: f.write(str(d_loss) + ',' + str(g_loss) + '\r') dcgan.save_weights('gan' + '_epoch' + str(epoch + 1) + '.h5') print('epoch' + str(epoch) + ' end') print()
def main(_): pp = pprint.PrettyPrinter() pp.pprint(flags.FLAGS.__flags) if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) #run_config = tf.compat.v1.ConfigProto #run_config.gpu_options.allow_growth = True #run_config.gpu_options.visible_device_list = '' with tf.compat.v1.Session() as sess: dcgan = DCGAN( sess, input_width=FLAGS.input_width, input_height=FLAGS.input_height, output_width=FLAGS.output_width, output_height=FLAGS.output_height, batch_size=FLAGS.batch_size, sample_num=FLAGS.sample_num, dataset_name=FLAGS.dataset, input_fname_pattern=FLAGS.input_fname_pattern, crop=FLAGS.crop, checkpoint_dir=FLAGS.checkpoint_dir, data_dir=FLAGS.data_dir) model_vars = tf.trainable_variables() slim.model_analyzer.analyze_vars(model_vars, print_info=True) if FLAGS.train: dcgan.train(FLAGS) else: if not dcgan.load(FLAGS.checkpoint_dir)[0]: raise Exception("[!] Train a model first, then run test mode") if FLAGS.predict: dcgan.predict(FLAGS.predict_dataset) else: dcgan.test()
def main(): # Load MNIST data print("[INFO] Loading MNIST handwritten digits...\n") (X, _), (_, _) = mnist.load_data() images = X.reshape(X.shape[0], 28, 28, 1) # Initialize DCGAN object gan_model = DCGAN(data=images, learning_rate=2e-04, batch_size=128, latent_dim=100) # Train GAN Model gan_model.train(epochs=100, batches_per_epoch=300, checkpoint_frequency=1, save_path='.././save_data/')
def main(): dcgan = DCGAN(s_size=s_size, batch_size=batch_size) train_im, total_imgs = load_image() total_batch = int(total_imgs / batch_size) losses = dcgan.loss(train_im) train_op = dcgan.train(losses, learning_rate=learning_rate) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33) config = tf.ConfigProto(gpu_options=gpu_options, device_count={"CPU": 8}, inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) init = tf.global_variables_initializer() sess.run(init) g_saver = tf.train.Saver(dcgan.g.variables) d_saver = tf.train.Saver(dcgan.d.variables) if os.path.isdir(save_dir): g_saver.restore(sess, tf.train.latest_checkpoint(save_dir + '/g_model')) d_saver.restore(sess, tf.train.latest_checkpoint(save_dir + '/d_model')) else: os.mkdir(save_dir) sample_z = np.float32( np.random.uniform(-1, 1, [dcgan.batch_size, dcgan.z_dim])) images = dcgan.sample_images(5, 5, inputs=sample_z) print("Start training") for step in range(1, epochs + 1): start_time = time.time() for batch in range(total_batch): _, g_loss, d_loss = sess.run( [train_op, losses[dcgan.g], losses[dcgan.d]]) print("epochs {} loss = G: {:.8f}, D: {:.8f} run time:{:.4f} sec"\ .format(step, g_loss, d_loss, time.time()-start_time)) g_saver.save(sess, save_dir + '/g_model/g.ckpt', global_step=step) d_saver.save(sess, save_dir + '/d_model/d.ckpt', global_step=step) with open('./test/%05d.jpg' % step, 'wb') as f: f.write(sess.run(images)) coord.request_stop() coord.join(threads)
import sys, os from PIL import Image from dcgan import DCGAN import numpy as np batch_size = 128 n_noise = 100 dcgan = DCGAN(batch_size, n_noise) dataset = [] dir_name = 'apples' for name in os.listdir(dir_name): if not name.startswith('.'): path = dir_name + '/' + name img = Image.open(path) img = img.resize((64, 64)) img = np.array(img, dtype=np.float32) img = (img - 127.5) / 127.5 dataset.append(img) dataset = np.array(dataset) dcgan.train(dataset, 1000)
def main(_): dcgan = DCGAN(batch_size=FLAGS.batch_size, s_size=32, nb_channels=FLAGS.nb_channels) # ssize6 traindata = read_decode(FLAGS.data_dir, dcgan.batch_size) # , dcgan.s_size BelO, BelF = tf.split(traindata, [512, 512], axis=2) traindata = BelO Certainty = tf.div(tf.add(1.0, tf.add(BelF, BelO)), 2) CertaintyMask = tf.to_int32(Certainty > 0.4) # sess = tf.Session() # with sess.as_default(): # print(CertaintyMask.get_shape()) losses = dcgan.loss(traindata) # feature matching graph = tf.get_default_graph() features_g = tf.reduce_mean( graph.get_tensor_by_name('dg/d/conv4/outputs:0'), 0) features_t = tf.reduce_mean( graph.get_tensor_by_name('dt/d/conv4/outputs:0'), 0) losses[dcgan.g] += tf.multiply(tf.nn.l2_loss(features_g - features_t), 0.05) tf.summary.scalar('g_loss', losses[dcgan.g]) tf.summary.scalar('d_loss', losses[dcgan.d]) train_op = dcgan.train(losses, learning_rate=0.0001) summary_op = tf.summary.merge_all() g_saver = tf.train.Saver(dcgan.g.variables, max_to_keep=15) d_saver = tf.train.Saver(dcgan.d.variables, max_to_keep=15) g_checkpoint_path = os.path.join(FLAGS.log_dir, 'g.ckpt') d_checkpoint_path = os.path.join(FLAGS.log_dir, 'd.ckpt') g_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'g.ckpt-' + str(FLAGS.latest_ckpt)) d_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'd.ckpt-' + str(FLAGS.latest_ckpt)) with tf.Session() as sess: CertaintyMask = tf.squeeze(CertaintyMask) # CertaintyMask_npArray = CertaintyMask.eval() summary_writer = tf.summary.FileWriter(FLAGS.log_dir, graph=sess.graph) sess.run(tf.global_variables_initializer()) # restore or initialize generator if os.path.exists(g_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.g.variables: print(' ' + v.name) g_saver.restore(sess, g_checkpoint_restore_path) if FLAGS.is_train and not FLAGS.is_complete: # restore or initialize discriminator if os.path.exists(d_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_restore_path) # setup for monitoring if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir) sample_z = sess.run( tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)) images = dcgan.sample_images(5, 5, inputs=sample_z) filename = os.path.join(FLAGS.images_dir, '000000.jpg') with open(filename, 'wb') as f: f.write(sess.run(images)) tf.train.start_queue_runners(sess=sess) for itr in range(FLAGS.latest_ckpt + 1, FLAGS.max_itr): start_time = time.time() _, g_loss, d_loss = sess.run( [train_op, losses[dcgan.g], losses[dcgan.d]]) duration = time.time() - start_time print('step: %d, loss: (G: %.8f, D: %.8f), time taken: %.3f' % (itr, g_loss, d_loss, duration)) if itr % 5000 == 0: # Images generated filename = os.path.join(FLAGS.images_dir, '%06d.jpg' % itr) with open(filename, 'wb') as f: f.write(sess.run(images)) # Summary summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, itr) # Checkpoints g_saver.save(sess, g_checkpoint_path, global_step=itr) d_saver.save(sess, d_checkpoint_path, global_step=itr) elif FLAGS.is_complete: # restore discriminator if os.path.exists(d_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_restore_path) # Directory to save completed images if not os.path.exists(FLAGS.complete_dir): os.makedirs(FLAGS.complete_dir) # Create mask if FLAGS.masktype == 'center': scale = 0.25 mask = np.ones(dcgan.image_shape) sz = dcgan.image_size l = int(sz * scale) u = int(sz * (1.0 - scale)) mask[l:u, l:u, :] = 0.0 if FLAGS.masktype == 'random': fraction_masked = 0.8 mask = np.ones(dcgan.image_shape) mask[np.random.random(dcgan.image_shape[:2]) < fraction_masked] = 0.0 if FLAGS.masktype == 'Uncertainty': mask = np.reshape(CertaintyMask_npArray, dcgan.image_shape) # Read actual images originals = glob(os.path.join(FLAGS.complete_src, '*.jpg')) batch_mask = np.expand_dims(mask, axis=0) for idx in range(len(originals)): image_src = get_image(originals[idx], dcgan.image_size, nb_channels=FLAGS.nb_channels) if FLAGS.nb_channels == 3: image = np.expand_dims(image_src, axis=0) elif FLAGS.nb_channels == 1: image = np.expand_dims(np.expand_dims(image_src, axis=3), axis=0) # Save original image (y) filename = os.path.join( FLAGS.complete_dir, 'original_image_{:02d}.jpg'.format(idx)) imsave(image_src, filename) # Save corrupted image (y . M) filename = os.path.join( FLAGS.complete_dir, 'corrupted_image_{:02d}.jpg'.format(idx)) if FLAGS.nb_channels == 3: masked_image = np.multiply(image_src, mask) imsave(masked_image, filename) elif FLAGS.nb_channels == 1: masked_image = np.multiply( np.expand_dims(image_src, axis=3), mask) imsave(masked_image[:, :, 0], filename) zhat = np.random.uniform(-1, 1, size=(1, dcgan.z_dim)) v = 0 momentum = 0.9 lr = 0.01 for i in range(0, 1001): fd = { dcgan.zhat: zhat, dcgan.mask: batch_mask, dcgan.image: image } run = [ dcgan.complete_loss, dcgan.grad_complete_loss, dcgan.G ] loss, g, G_imgs = sess.run(run, feed_dict=fd) v_prev = np.copy(v) v = momentum * v - lr * g[0] zhat += -momentum * v_prev + (1 + momentum) * v zhat = np.clip(zhat, -1, 1) if i % 100 == 0: filename = os.path.join( FLAGS.complete_dir, 'hats_img_{:02d}_{:04d}.jpg'.format(idx, i)) if FLAGS.nb_channels == 3: save_images(G_imgs[0, :, :, :], filename) if FLAGS.nb_channels == 1: save_images(G_imgs[0, :, :, 0], filename) inv_masked_hat_image = np.multiply( G_imgs, 1.0 - batch_mask) completed = masked_image + inv_masked_hat_image filename = os.path.join( FLAGS.complete_dir, 'completed_{:02d}_{:04d}.jpg'.format(idx, i)) if FLAGS.nb_channels == 3: save_images(completed[0, :, :, :], filename) if FLAGS.nb_channels == 1: save_images(completed[0, :, :, 0], filename) else: generated = sess.run(dcgan.sample_images(8, 8)) if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) filename = os.path.join(FLAGS.images_dir, 'generated_image.jpg') with open(filename, 'wb') as f: print('write to %s' % filename) f.write(generated)
def main(_): dcgan = DCGAN(batch_size=FLAGS.batch_size, s_size=6, nb_channels=FLAGS.nb_channels) traindata = read_decode(FLAGS.data_dir, dcgan.batch_size, dcgan.s_size) losses = dcgan.loss(traindata) # feature matching graph = tf.get_default_graph() features_g = tf.reduce_mean( graph.get_tensor_by_name('dg/d/conv4/outputs:0'), 0) features_t = tf.reduce_mean( graph.get_tensor_by_name('dt/d/conv4/outputs:0'), 0) losses[dcgan.g] += tf.multiply(tf.nn.l2_loss(features_g - features_t), 0.05) tf.summary.scalar('g_loss', losses[dcgan.g]) tf.summary.scalar('d_loss', losses[dcgan.d]) train_op = dcgan.train(losses, learning_rate=0.0001) summary_op = tf.summary.merge_all() g_saver = tf.train.Saver(dcgan.g.variables, max_to_keep=15) d_saver = tf.train.Saver(dcgan.d.variables, max_to_keep=15) g_checkpoint_path = os.path.join(FLAGS.log_dir, 'g.ckpt') d_checkpoint_path = os.path.join(FLAGS.log_dir, 'd.ckpt') g_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'g.ckpt-' + str(FLAGS.latest_ckpt)) d_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'd.ckpt-' + str(FLAGS.latest_ckpt)) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(FLAGS.log_dir, graph=sess.graph) sess.run(tf.global_variables_initializer()) # restore or initialize generator if os.path.exists(g_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.g.variables: print(' ' + v.name) g_saver.restore(sess, g_checkpoint_restore_path) if FLAGS.is_train: # restore or initialize discriminator if os.path.exists(d_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_restore_path) # setup for monitoring if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir) sample_z = sess.run( tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)) images = dcgan.sample_images(5, 5, inputs=sample_z) filename = os.path.join(FLAGS.images_dir, '000000.jpg') with open(filename, 'wb') as f: f.write(sess.run(images)) tf.train.start_queue_runners(sess=sess) for itr in range(FLAGS.latest_ckpt + 1, FLAGS.max_itr): start_time = time.time() _, g_loss, d_loss = sess.run( [train_op, losses[dcgan.g], losses[dcgan.d]]) duration = time.time() - start_time f1 = open('./console.log', 'w+') print >> f1, ( 'step: %d, loss: (G: %.8f, D: %.8f), time taken: %.3f' % (itr, g_loss, d_loss, duration)) if itr % 5000 == 0: # Images generated filename = os.path.join(FLAGS.images_dir, '%06d.jpg' % itr) with open(filename, 'wb') as f: f.write(sess.run(images)) # Summary summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, itr) # Checkpoints g_saver.save(sess, g_checkpoint_path, global_step=itr) d_saver.save(sess, d_checkpoint_path, global_step=itr) else: generated = sess.run(dcgan.sample_images(8, 8)) if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) filename = os.path.join(FLAGS.images_dir, 'generated_image.jpg') with open(filename, 'wb') as f: print('write to %s' % filename) f.write(generated)
import numpy as np from keras.datasets import fashion_mnist from image_helper import ImageHelper from dcgan import DCGAN (X, _), (_, _) = fashion_mnist.load_data() X_train = X / 127.5 - 1. X_train = np.expand_dims(X_train, axis=3) image_helper = ImageHelper() generative_advarsial_network = DCGAN(X_train[0].shape, 100, image_helper, 1) generative_advarsial_network.train(20000, X_train, batch_size=32)
import numpy as np from dcgan import DCGAN import tensorflow as tf flags = tf.app.flags flags.DEFINE_integer("epoch", 25, "Epoch to train [25]") flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]") flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]") flags.DEFINE_integer("train_size", np.inf, "The size of train images [np.inf]") flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]") flags.DEFINE_integer("image_size", 64, "The size of image to use") flags.DEFINE_string("dataset", "lfw-aligned-64", "Dataset directory.") flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]") flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]") FLAGS = flags.FLAGS if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) dcgan.train(FLAGS)
def main(argv=None): #Creating an object of the DCGAN dcgan = DCGAN(s_size=10) #Call to function to read data in .tfrecord format traindata = inputs(dcgan.batch_size, dcgan.s_size) print('Train data', traindata) # Calculating the losses losses = dcgan.loss(traindata) # Extracting the Generator and Discrimintor loss tf.summary.scalar('g loss', losses[dcgan.g]) tf.summary.scalar('d loss', losses[dcgan.d]) #Minimize the Generator and the Discrimintor losses train_op = dcgan.train(losses) summary_op = tf.summary.merge_all() #Creating objects to save the generator and discriminator states g_saver = tf.train.Saver(dcgan.g.variables) d_saver = tf.train.Saver(dcgan.d.variables) #Defining the directory to store the generator check points g_checkpoint_path = os.path.join(FLAGS.logdir, 'gckpt/') print('G checkpoint path: ', g_checkpoint_path) ##Defining the directory to store the discriminator check points d_checkpoint_path = os.path.join(FLAGS.logdir, 'dckpt/') print('D Checkpoint Path:',d_checkpoint_path) if not os.path.exists(g_checkpoint_path): os.makedirs(g_checkpoint_path) if not os.path.exists(d_checkpoint_path): os.makedirs(d_checkpoint_path) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(FLAGS.logdir, graph=sess.graph) newStepNo = 0 # restore or initialize generator sess.run(tf.global_variables_initializer()) gckpt = tf.train.get_checkpoint_state(g_checkpoint_path) if gckpt and gckpt.model_checkpoint_path: g_saver.restore(sess, gckpt.model_checkpoint_path) print('Model restored from ' + gckpt.model_checkpoint_path) newStepCheck = gckpt.model_checkpoint_path newStepNo = int(newStepCheck.split('-')[1]) #if os.path.exists(g_checkpoint_path): #print('restore variables for G:') #for v in dcgan.g.variables: #print(' ' + v.name) #g_saver.restore(sess, ckpt.model_checkpoint_path) #g_saver.restore(sess, g_checkpoint_path) #if os.path.exists(d_checkpoint_path): #print('restore variables for D:') #for v in dcgan.d.variables: # print(' ' + v.name) #d_saver.restore(sess, d_checkpoint_path) dckpt = tf.train.get_checkpoint_state(d_checkpoint_path) if dckpt and dckpt.model_checkpoint_path: d_saver.restore(sess, dckpt.model_checkpoint_path) print('Model restored from ' + dckpt.model_checkpoint_path) # setup for monitoring sample_z = sess.run(tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)) images = dcgan.sample_images(1, 1, inputs=sample_z) # start training tf.train.start_queue_runners(sess=sess) #for step in range(FLAGS.max_steps): while newStepNo <= FLAGS.max_steps: start_time = time.time() _, g_loss, d_loss = sess.run([train_op, losses[dcgan.g], losses[dcgan.d]]) duration = time.time() - start_time print('{}: step {:5d}, loss = (G: {:.8f}, D: {:.8f}) ({:.3f} sec/batch)'.format( datetime.now(), newStepNo, g_loss, d_loss, duration)) # save generated images if newStepNo % 100 == 0: # summary summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, newStepNo) # sample images filename = os.path.join(FLAGS.images_dir, '%05d.png' % newStepNo) with open(filename, 'wb') as f: f.write(sess.run(images)) # save variables if newStepNo % 500 == 0: g_saver.save(sess, g_checkpoint_path + 'g.ckpt', global_step=newStepNo) print('Save mode for G in checkpoint_path: ', g_checkpoint_path) d_saver.save(sess, d_checkpoint_path + 'd.ckpt', global_step=newStepNo) print('Save mode for D in checkpoint_path: ', d_checkpoint_path) #g_saver.save(sess, g_checkpoint_path, global_step=step) #d_saver.save(sess, d_checkpoint_path, global_step=step) newStepNo = newStepNo+1
from utils import * import tensorflow as tf from dcgan import DCGAN if __name__ == '__main__': # imgs = load_anime_faces() # 开始训练 with tf.Session() as sess: dcgan = DCGAN(sess, height=64, width=64, channel=3, checkpoint_dir='./check_points/checkpoint_face01', sample_dir='./sample/face01_04', sample_size=36) # for i in range(20): # dir = './final_cuted_faces/faces_{}'.format(i) # dcgan.train(dir) dir = './final_cuted_faces' dcgan.train(dir) # dcgan.complete(imgs[0:64, :, :, :], mask_type='center', out_dir='complete_test_2') # dcgan.sample('sample_test_minist_5', 60)
exit() x_train, y_train = create_dataset( 128, 128, nSlices=1000, resize=1, directory='FluidArt/') # 3 channels = RGB assert (x_train.shape[0] > 0) x_train /= 255 # plot results to make sure data looks good! fig, axs = plt.subplots(4, 4) for i in range(4): for j in range(4): axs[i, j].imshow(x_train[np.random.randint(x_train.shape[0])]) axs[i, j].axis('off') plt.show() dcgan = DCGAN(img_rows=x_train[0].shape[0], img_cols=x_train[0].shape[1], channels=x_train[0].shape[2], latent_dim=256, name='fluid_256_128') try: dcgan.load_weights( generator_file="generator ({}).h5".format(dcgan.name), discriminator_file="discriminator ({}).h5".format(dcgan.name)) except: pass dcgan.train(x_train, epochs=args.epochs, batch_size=32, save_interval=500)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri May 24 19:30:11 2019 @author: clytie """ if __name__ == "__main__": from dcgan import DCGAN import os import cv2 import numpy as np from tqdm import tqdm image_path = os.listdir("faces") datas = [] for path in tqdm(image_path): if "jpg" in path or "png" in path: datas.append(cv2.imread(f"faces/{path}")) datas = np.asarray(datas) img_dim = (96, 96, 3) dcgan = DCGAN(img_dim) dcgan.train(datas)
from dcgan import DCGAN lr = 0.0001 root = '/Users/mac/Documents/GitHub/GAN/google/data/' batch_size = 64 model = DCGAN() model.load_dataset(root = root, batch_size = batch_size) model.build_model(lr = lr, Epoch = 50) model.train()
max_checks_without_progress = 100 # 특정 횟수 만큼 조건이 만족하지 않은 경우 best_model_params = None # 가장 좋은 모델의 parameter 값을 저장하는 변수 print('Data Loading ...') total_x = read_data() print('Data Loaded!!!') print('Learning Started!') for epoch in range(epochs): epoch_stime = time.time() g_tot_loss, d_tot_loss = 0., 0. '''train part''' for start_idx in range(0, 12000, batch_size): g_loss, d_loss, *_ = m.train(total_x[start_idx: start_idx+batch_size]) g_tot_loss += g_loss / batch_size d_tot_loss += d_loss / batch_size if epoch % 10 == 0: # create_image(m.generate(), epoch+1) save_image(m.sample_images(), sess, epoch + 1) '''early stopping condition check''' if d_tot_loss < best_loss_val: best_loss_val = d_tot_loss check_since_last_progress = 0 best_model_params = get_model_params() saver.save(sess, 'log/dcgan_v1.ckpt') else: check_since_last_progress += 1
from dcgan import DCGAN num_repeat = 3 for i in range(num_repeat): dcgan = DCGAN() dcgan.train()
def main(model, init_train, start_epoch, cycle, epochs, batch_size, save_intervals): model = model.upper() if model == 'DCGAN_1': my_model = DCGAN(name='DCGAN_1') elif model == 'DCGAN_2': my_model = DCGAN(name='DCGAN_2') elif model == 'DCGAN_3': my_model = DCGAN(name='DCGAN_3') elif model == 'VAE_1': my_model = VAE(name='VAE_1') elif model.upper() == 'VAE_2': my_model = VAE(name='VAE_2') elif model == 'VAE_3': my_model = VAE(name='VAE_3') elif model == 'VAE_4': my_model = VAE(name='VAE_4') else: print( 'The selected model {} is not in the list [DCGAN_1, DCGAN_2, DCGAN_3, VAE_1, VAE_2, VAE_3, VAE_4]' .format(model)) print("Python main programm for generating images using {}".format(model)) ## preprocess data images if init_train and save the images as pickle file. if not init_train load the saved file if init_train: print("Start initial process of building the {} model.".format(model)) print("Do Preprocessing by loading scraped images...") ### manually merged into merged_japanese, so take that subdirectory as datapath source: if False: ## select genre = "yakusha-e" image_resized_1 = preprocess(genre_or_style="yakusha-e", min_vals=[128, 128]) ## select style = "Japanese Art" image_resized_2 = preprocess(genre_or_style="Japanese Art", min_vals=[128, 128]) final_images_stacked = np.vstack( (image_resized_1, image_resized_2)) del image_resized_1, image_resized_2 gc.collect() else: final_images_stacked = preprocess(genre_or_style="merged_japanese", min_vals=[128, 128]) ## save the train data such that in the next intermediate steps the preprocess() fnc is not needed, rather load file try: print( "Save preprocessed image data on ../data/train_data.npz in order to retrieve in upcoming training cycles." ) np.savez_compressed(file="../data/train_data.npz", a=final_images_stacked) except: print( "Could not save train data on machine for upcoming training cycles." ) else: try: print("Load preprocessed image data from earlier training cycles.") final_images_stacked = np.load(file="../data/train_data.npz")["a"] except: ### manually merged into merged_japanese, so take that subdirectory as datapath source: if False: ## select genre = "yakusha-e" image_resized_1 = preprocess(genre_or_style="yakusha-e", min_vals=[128, 128]) ## select style = "Japanese Art" image_resized_2 = preprocess(genre_or_style="Japanese Art", min_vals=[128, 128]) final_images_stacked = np.vstack( (image_resized_1, image_resized_2)) del image_resized_1, image_resized_2 gc.collect() else: final_images_stacked = preprocess( genre_or_style="merged_japanese", min_vals=[128, 128]) if init_train: print("Start initial training of the {} model:".format(model)) print("There are {} images provided for training".format( len(final_images_stacked))) my_model.train(data=final_images_stacked, epochs=epochs, batch_size=batch_size, save_intervals=save_intervals, init_train=init_train, start_epoch=start_epoch, cycle=cycle) else: if model in ['DCGAN_1', 'DCGAN_2', 'DCGAN_3']: print( "Using last epoch {} of generator and discriminator for the stacked {} model:" .format(start_epoch, model)) generator_weights = "../model/{}/epoch_{}_generator.h5".format( model, start_epoch) discrimininator_weights = "../model/{}/epoch_{}_discriminator.h5".format( model, start_epoch) #load generator weights my_model.generator.load_weights(filepath=generator_weights) #load discriminator weights my_model.discriminator.load_weights( filepath=discrimininator_weights) #train the dcgan with last epoch weights print( "Training the {} model based on last epoch weights {}.".format( model, start_epoch)) elif model in ['VAE_1', 'VAE_2', 'VAE_3', 'VAE_4']: print( "Using last epoch {} of encoder and decoder for the stacked {} model:" .format(start_epoch, model)) encoder_weights = "../model/{}/epoch_{}_encoder.h5".format( model, start_epoch) decoder_weights = "../model/{}/epoch_{}_decoder.h5".format( model, start_epoch) vae_weights = "../model/{}/epoch_{}_vae.h5".format( model, start_epoch) #load encoder weights my_model.encoder.load_weights(filepath=encoder_weights) #load decoder weights my_model.decoder.load_weights(filepath=decoder_weights) #load VAE weights my_model.vae.load_weights(filepath=vae_weights) #train the VAE with last epoch weights print( "Training the {} model based on last epoch weights {}.".format( model, start_epoch)) else: print('Selected model {} is not available') my_model.train(data=final_images_stacked, epochs=epochs, batch_size=batch_size, save_intervals=save_intervals, init_train=init_train, start_epoch=start_epoch, cycle=cycle)
def main(_): dcgan = DCGAN(s_size=6) traindata = read_decode(dcgan.batch_size, dcgan.s_size) losses = dcgan.loss(traindata) # feature matching graph = tf.get_default_graph() features_g = tf.reduce_mean( graph.get_tensor_by_name('dg/d/conv4/outputs:0'), 0) features_t = tf.reduce_mean( graph.get_tensor_by_name('dt/d/conv4/outputs:0'), 0) losses[dcgan.g] += tf.multiply(tf.nn.l2_loss(features_g - features_t), 0.05) tf.summary.scalar('g loss', losses[dcgan.g]) tf.summary.scalar('d loss', losses[dcgan.d]) train_op = dcgan.train(losses, learning_rate=0.0001) summary_op = tf.summary.merge_all() g_saver = tf.train.Saver(dcgan.g.variables, max_to_keep=15) d_saver = tf.train.Saver(dcgan.d.variables, max_to_keep=15) g_checkpoint_path = os.path.join(FLAGS.log_dir, 'g.ckpt') d_checkpoint_path = os.path.join(FLAGS.log_dir, 'd.ckpt') g_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'g.ckpt-' + str(FLAGS.latest_ckpt)) d_checkpoint_restore_path = os.path.join( FLAGS.log_dir, 'd.ckpt-' + str(FLAGS.latest_ckpt)) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(FLAGS.log_dir, graph=sess.graph) sess.run(tf.global_variables_initializer()) # restore or initialize generator if os.path.exists(g_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.g.variables: print(' ' + v.name) g_saver.restore(sess, g_checkpoint_restore_path) if FLAGS.is_train and not FLAGS.is_complete: # restore or initialize discriminator if os.path.exists(d_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_restore_path) # setup for monitoring if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir) sample_z = sess.run( tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)) images = dcgan.sample_images(5, 5, inputs=sample_z) filename = os.path.join(FLAGS.images_dir, '000000.jpg') with open(filename, 'wb') as f: f.write(sess.run(images)) tf.train.start_queue_runners(sess=sess) for itr in range(FLAGS.latest_ckpt + 1, FLAGS.max_itr): start_time = time.time() _, g_loss, d_loss = sess.run( [train_op, losses[dcgan.g], losses[dcgan.d]]) duration = time.time() - start_time print('step: %d, loss: (G: %.8f, D: %.8f), time taken: %.3f' % (itr, g_loss, d_loss, duration)) if itr % 5000 == 0: # Images generated filename = os.path.join(FLAGS.images_dir, '%06d.jpg' % itr) with open(filename, 'wb') as f: f.write(sess.run(images)) # Summary summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, itr) # Checkpoints g_saver.save(sess, g_checkpoint_path, global_step=itr) d_saver.save(sess, d_checkpoint_path, global_step=itr) elif FLAGS.is_complete: # restore discriminator if os.path.exists(d_checkpoint_restore_path + '.meta'): print('Restoring variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_restore_path) # Directory to save completed images if not os.path.exists(FLAGS.complete_dir): os.makedirs(FLAGS.complete_dir) # Create mask scale = 0.25 mask = np.ones(dcgan.image_shape) sz = dcgan.image_size l = int(dcgan.image_size * scale) u = int(dcgan.image_size * (1.0 - scale)) mask[l:u, l:u, :] = 0.0 masks = np.expand_dims(mask, axis=0) # Read actual images images = glob(os.path.join(FLAGS.complete_src, '*.jpg')) for idx in range(len(images)): image_src = get_image(images[idx], dcgan.image_size) image = np.expand_dims(image_src, axis=0) # Save image after crop (y) orig_fn = os.path.join( FLAGS.complete_dir, 'original_image_{:02d}.jpg'.format(idx)) imsave(image_src, orig_fn) # Save corrupted image (y . M) corrupted_fn = os.path.join( FLAGS.complete_dir, 'corrupted_image_{:02d}.jpg'.format(idx)) masked_image = np.multiply(image_src, mask) imsave(masked_image, corrupted_fn) zhat = np.random.uniform(-1, 1, size=(1, dcgan.z_dim)) v = 0 momentum = 0.9 lr = 0.01 for i in range(0, 10001): fd = { dcgan.zhat: zhat, dcgan.mask: masks, dcgan.image: image } run = [ dcgan.complete_loss, dcgan.grad_complete_loss, dcgan.G ] loss, g, G_imgs = sess.run(run, feed_dict=fd) v_prev = np.copy(v) v = momentum * v - lr * g[0] zhat += -momentum * v_prev + (1 + momentum) * v zhat = np.clip(zhat, -1, 1) if i % 100 == 0: hats_fn = os.path.join( FLAGS.complete_dir, 'hats_img_{:02d}_{:04d}.jpg'.format(idx, i)) save_images(G_imgs[0, :, :, :], hats_fn) inv_masked_hat_image = np.multiply( G_imgs, 1.0 - masks) completed = masked_image + inv_masked_hat_image complete_fn = os.path.join( FLAGS.complete_dir, 'completed_{:02d}_{:04d}.jpg'.format(idx, i)) save_images(completed[0, :, :, :], complete_fn) else: generated = sess.run(dcgan.sample_images(8, 8)) if not os.path.exists(FLAGS.images_dir): os.makedirs(FLAGS.images_dir) filename = os.path.join(FLAGS.images_dir, 'generated_image.jpg') with open(filename, 'wb') as f: print('write to %s' % filename) f.write(generated)
def normalize(X): return (X - 127.5) / 127.5 def denormalize(X): return ((X + 1.0) / 2.0 * 255.0).astype(dtype=np.uint8) if __name__ == '__main__': batch_size = 16 epochs = 1000 input_dim = 30 g_optim = Adam(lr=0.0001, beta_1=0.5, beta_2=0.9) d_optim = Adam(lr=0.0001, beta_1=0.5, beta_2=0.9) ### 0. prepare data X_train, X_test, y_test = load_cucumber() X_train = normalize(X_train) X_test = normalize(X_test) input_shape = X_train[0].shape X_test_original = X_test.copy() ### 1. train generator & discriminator dcgan = DCGAN(input_dim, input_shape) dcgan.compile(g_optim, d_optim) g_losses, d_losses = dcgan.train(epochs, batch_size, X_train) with open('loss.csv', 'w') as f: for g_loss, d_loss in zip(g_losses, d_losses): f.write(str(g_loss) + ',' + str(d_loss) + '\n')
import os,path,sys,shutil import numpy as np import argparse from dcgan import BatchGenerator,DCGAN if __name__=="__main__": parser = argparse.ArgumentParser() parser.add_argument("--nBatch","-b",dest="nBatch",type=int,default=64) parser.add_argument("--learnRate","-r",dest="learnRate",type=float,default=1e-5) parser.add_argument("--saveFolder","-s",dest="saveFolder",type=str,default="models") parser.add_argument("--reload","-l",dest="reload",type=str,default=None) args = parser.parse_args() args.zdim = 49 batch = BatchGenerator() gan = DCGAN(isTraining=True,imageSize=[28,28],args=args) gan.train(f_batch=batch.getBatch)
def main(argv=None): """ Main function that calls a training batch sample and trains the model. """ if len(argv) < 2: print "Please input desired dataset in cmd line: `lsun` or `celeb`." sys.exit() dcgan = DCGAN(batch_size=64, s_size=4) traindata = None if argv[1] == 'lsun': # load input pipeline for LSUN dataset traindata = load_data.lsun_inputs(dcgan.batch_size, dcgan.s_size) elif argv[1] == 'celeb': # load input pipeline for CelebA dataset traindata = load_data.celeb_inputs(dcgan.batch_size, dcgan.s_size) losses = dcgan.loss(traindata) # feature mapping graph = tf.get_default_graph() features_g = tf.reduce_mean(graph.get_tensor_by_name('dg/d/conv4/outputs:0'), 0) features_t = tf.reduce_mean(graph.get_tensor_by_name('dt/d/conv4/outputs:0'), 0) # adding the regularization term losses[dcgan.g] += tf.multiply(tf.nn.l2_loss(features_t - features_g), 0.05) # train and summary tf.summary.scalar('g_loss', losses[dcgan.g]) tf.summary.scalar('d_loss', losses[dcgan.d]) train_op = dcgan.train(losses, learning_rate=0.0002) summary_op = tf.summary.merge_all() g_saver = tf.train.Saver(dcgan.g.variables) d_saver = tf.train.Saver(dcgan.d.variables) g_checkpoint_path = os.path.join(FLAGS.logdir, 'g.ckpt') d_checkpoint_path = os.path.join(FLAGS.logdir, 'd.ckpt') with tf.Session() as sess: summary_writer = tf.summary.FileWriter(FLAGS.logdir, graph=sess.graph) # restore or initialize generator sess.run(tf.global_variables_initializer()) if os.path.exists(g_checkpoint_path): print('restore variables:') for v in dcgan.g.variables: print(' ' + v.name) g_saver.restore(sess, g_checkpoint_path) if os.path.exists(d_checkpoint_path): print('restore variables:') for v in dcgan.d.variables: print(' ' + v.name) d_saver.restore(sess, d_checkpoint_path) # setup for monitoring sample_z = sess.run(tf.random_uniform([dcgan.batch_size, dcgan.z_dim], minval=-1.0, maxval=1.0)) images = dcgan.sample_images(inputs=sample_z) # start training tf.train.start_queue_runners(sess=sess) for step in range(FLAGS.max_steps): start_time = time.time() _, g_loss, d_loss = sess.run([train_op, losses[dcgan.g], losses[dcgan.d]]) duration = time.time() - start_time print('{}: step {:5d}, loss = (G: {:.8f}, D: {:.8f}) ({:.3f} sec/batch)'.format( datetime.now(), step, g_loss, d_loss, duration)) # save generated images if step % 100 == 0: # summary summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # sample images filename = os.path.join(FLAGS.images_dir, '%05d.jpg' % step) with open(filename, 'wb') as f: f.write(sess.run(images)) # save variables if step % 500 == 0: g_saver.save(sess, g_checkpoint_path, global_step=step) d_saver.save(sess, d_checkpoint_path, global_step=step)
# tensorflow.app.flags.DEFINE_integer('num_examples_per_epoch_for_train', 5000, # """number of examples for train""") def get_images_batch(): return [] dcgan = DCGAN( g_depths=[8192, 4096, 2048, 1024, 512, 256, 128], d_depths=[64, 128, 256, 512, 1024, 2048, 4096], s_size=4, ) train_images = get_images_batch() losses = dcgan.loss(train_images) train_op = dcgan.train(losses) with tensorflow.Session() as sess: sess.run(tensorflow.global_variables_initializer()) for step in range(FLAGS.max_steps): _, g_loss_value, d_loss_value = sess.run( [train_op, losses[dcgan.g], losses[dcgan.d]]) images = dcgan.sample_images() with tensorflow.Session() as sess: # restore trained variables generated = sess.run(images) with open('output.binary', 'wb') as f: f.write(generated)
from keras.datasets import cifar10 from dcgan import DCGAN if __name__ == '__main__': (x_train, y_train), (x_test, y_test) = cifar10.load_data() # only birds, then scale images between 0-1 x_train = x_train[ (y_train==2).reshape(-1) ] x_train = x_train/255 dcgan = DCGAN(img_rows = x_train[0].shape[0], img_cols = x_train[0].shape[1], channels = x_train[0].shape[2], latent_dim=128, name='cifar10') dcgan.train(x_train, epochs=10001, batch_size=32, save_interval=500)