def build(x): real_label = 1.0 fake_label = 0.0 tf.summary.histogram('input', x) tf.summary.image( 'input_img', tf.cast(tf.clip_by_value(input_data.invert_norm(x), 0, 255), tf.uint8)) # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) # train with real optimizerD = tf.train.AdamOptimizer(learning_rate=opts.learning_rate, beta1=opts.adam_beta1, beta2=opts.adam_beta2, epsilon=opts.opt_epsilon) output_x = discriminator_net(x, True, opts) errD_real = criterion(real_label, output_x) tf.summary.scalar('errD_real', errD_real) # train with fake noise = tf.random.uniform([opts.batch_size, 1, 1, opts.nz], -1.0, 1.0) fake = generator_net(noise, True, opts) tf.summary.histogram('fake', fake) tf.summary.image( 'fake_img', tf.cast(tf.clip_by_value(input_data.invert_norm(fake), 0, 255), tf.uint8)) output_fake = discriminator_net(fake, True, opts) errD_fake = criterion(fake_label, output_fake) errD = errD_real + errD_fake tf.summary.scalar('errD_fake', errD_fake) tf.summary.scalar('errD', errD) # (2) Update G network: maximize log(D(G(z))) optimizerG = tf.train.AdamOptimizer(learning_rate=opts.learning_rate, beta1=opts.adam_beta1, beta2=opts.adam_beta2, epsilon=opts.opt_epsilon) errG = criterion(real_label, output_fake) tf.summary.scalar('errG', errG) t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if 'Discriminator' in var.name] g_vars = [var for var in t_vars if 'Generator' in var.name] update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op_D = optimizerD.minimize(loss=errD, var_list=d_vars) # first update D newwork, then update G network twice with tf.control_dependencies([train_op_D]): train_op_G_1 = optimizerG.minimize(loss=errG, var_list=g_vars) with tf.control_dependencies([train_op_G_1]): train_op_G_2 = optimizerG.minimize(loss=errG, var_list=g_vars) with tf.control_dependencies(update_ops): train_op = train_op_G_2 return train_op, errD_real, errD_fake, errG
def sample(filename='output.jpg'): with tf.Graph().as_default(): batch_size = 12 inoise = tf.random.uniform([batch_size, opts.nz], -1.0, 1.0) categorical_code = tf.fill((batch_size, 1), 9) categorical_code = tf.one_hot(categorical_code, opts.num_categorical) categorical_code = tf.reshape(categorical_code, (batch_size, -1)) continuous_code = tf.random.normal((batch_size, opts.num_continuous)) noise = tf.concat([inoise, categorical_code, continuous_code], 1) noise = tf.reshape(noise, (batch_size, 1, 1, -1)) print(noise) # set training True for good quality image fake = generator_net(noise, False, opts) checkpoint_path = opts.sample_checkpoint_path if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkpoint_path) output = sess.run(fake) output = input_data.invert_norm(output) for idx, img in enumerate(output): if opts.nc == 1: img = np.squeeze(img, -1) utils.imsave(str(idx + 1) + '_' + filename, img)
def sample(filename='output.jpg'): with tf.Graph().as_default(): # set training True for good quality image fake = generator_net(tf.constant(fixed_noise), True, opts) checkpoint_path = opts.sample_checkpoint_path if tf.gfile.IsDirectory(checkpoint_path): checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, checkpoint_path) output = sess.run(fake) output = input_data.invert_norm(output) output = np.squeeze(output, 0) if opts.nc == 1: output = np.squeeze(output, -1) utils.imsave(filename, output)
def add_summary_img(name, img): tf.summary.image( name, tf.cast(tf.clip_by_value(input_data.invert_norm(img), 0, 255), tf.uint8))