try: os.makedirs(IMAGES_DIR) except: pass # placeholders for data going into the network global_step = tf.Variable(0, name='global_step', trainable=False) z = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z') train_images_list = data_ops.loadData(DATA_DIR, DATASET) filename_queue = tf.train.string_input_producer(train_images_list) real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE) # generated images gen_images = netG(z, BATCH_SIZE) # get the output from D on the real and fake data errD_real = netD(real_images, BATCH_SIZE, SELU, NORM) errD_fake = netD(gen_images, BATCH_SIZE, SELU, NORM, reuse=True) # cost functions errD = tf.reduce_mean(errD_real) - tf.reduce_mean(errD_fake) errG = tf.reduce_mean(errD_fake) # gradient penalty epsilon = tf.random_uniform([], 0.0, 1.0) x_hat = real_images * epsilon + (1 - epsilon) * gen_images d_hat = netD(x_hat, BATCH_SIZE, SELU, NORM, reuse=True) gradients = tf.gradients(d_hat, x_hat)[0] slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),
checkpoint_dir = sys.argv[1] n = 15 # cols m = 5 # rows num_images = n * m img_size = (112, 112, 3) canvas = 255 * np.ones( (m * img_size[0] + (10 * m) + 10, n * img_size[1] + (10 * n) + 10, 3), dtype=np.uint8) z = tf.placeholder(tf.float32, shape=(num_images, 1024), name='z') generated_images = netG(z, num_images) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(checkpoint_dir) if ckpt and ckpt.model_checkpoint_path: print "Restoring previous model..." try: saver.restore(sess, ckpt.model_checkpoint_path) print "Model restored" except: raise print "Could not restore model"
# placeholders for data going into the network global_step = tf.Variable(0, name='global_step', trainable=False) z1 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z1') z2 = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 100), name='z2') train_images_list = data_ops.loadData(DATA_DIR, DATASET) filename_queue = tf.train.string_input_producer(train_images_list) # sample from true data real_images = data_ops.read_input_queue(filename_queue, BATCH_SIZE) # dummy to initialize D dummy = netD(real_images, reuse=False) # sample two independent images from the generator gen_images1 = netG(z1, BATCH_SIZE) gen_images2 = netG(z2, BATCH_SIZE, reuse=True) # define the critic def critic(x): return tf.norm(netD(x, reuse=True) - netD(gen_images2, reuse=True), axis=1) - tf.norm(netD(x, reuse=True), axis=1) # sample epsilon from uniform distribution epsilon = tf.random_uniform([], 0.0, 1.0) # interpolate real and generated first samples x_hat = epsilon * real_images + (1 - epsilon) * gen_images1 d_hat = critic(x_hat) ddx = tf.gradients(d_hat, x_hat)[0] ddx = tf.norm(ddx, axis=1)