def evaluate(): if len(sys.argv) < 4: print( "Usage: python3 evaluate.py [config suffix] [model name] [image files name1] [image files name2] ..." ) exit(-1) if len(sys.argv) == 4: print( " Note: Process a single image at a time may be inefficient - try multiple inputs)" ) print("(TODO: batch processing when images have the same resolution)") print() print("Initializing...") config_name = sys.argv[1] import shutil shutil.copy( 'models/%s/%s/scripts/config_%s.py' % (config_name, sys.argv[2], config_name), 'config_tmp.py') cfg = load_config('tmp') cfg.name = sys.argv[1] + '/' + sys.argv[2] net = GAN(cfg, restore=True) net.restore(20000) spec_files = sys.argv[3:] print('processing files {}', spec_files) net.eval(spec_files=spec_files, step_by_step=True)
def main(FLAGS): if not os.path.isdir(FLAGS.data_dir): os.makedirs(FLAGS.data_dir) download_data(FLAGS.data_dir, FLAGS.train_data, FLAGS.test_data) net = GAN(real_size, z_size, learning_rate, alpha=FLAGS.alpha, beta1=FLAGS.beta1) '''--------Load data--------''' train_data = loadmat(FLAGS.data_dir + '/' + FLAGS.train_data) test_data = loadmat(FLAGS.data_dir + '/' + FLAGS.test_data) dataset = preprocessor.Dataset(train_data, test_data) '''--------Build net--------''' saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(72, z_size)) samples, losses = [], [] steps = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(FLAGS.epochs): for x, y in dataset.batches(FLAGS.batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(FLAGS.batch_size, z_size)) # Run optimizers _ = sess.run(net.d_opt, feed_dict={ net.input_real: x, net.input_z: batch_z }) _ = sess.run(net.g_opt, feed_dict={ net.input_z: batch_z, net.input_real: x }) if steps % FLAGS.print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval({ net.input_z: batch_z, net.input_real: x }) train_loss_g = net.g_loss.eval({net.input_z: batch_z}) print('Epoch {}/{}...'.format(e + 1, FLAGS.epochs), 'Discriminator Loss: {:.4f}'.format(train_loss_d), 'Generator loss: {:.4f}'.format(train_loss_g)) # Save losses to view after traning losses.append((train_loss_d, train_loss_g)) if steps % FLAGS.show_every == 0: gen_samples = sess.run(generator(net.input_z, 3, reuse=True, training=False), feed_dict={net.input_z: sample_z}) samples.append(gen_samples) _ = utils.view_samples(-1, samples, 6, 12, figsize=(FLAGS.h_figsize, FLAGS.v_figsize)) plt.show() saver.save(sess, './checkpoints/generator.ckpt') with open('samples.pkl', 'wb') as f: pkl.dump(samples, f) fig, ax = plt.subplot() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title('Training Losses') plt.legend() plt.show()
def main(): config_name = sys.argv[1] cfg = load_config(config_name) cfg.name = sys.argv[1] + '/' + sys.argv[2] net = GAN(cfg, mode=RESTORE_TRAIN) net.train()
def main(): config_name = sys.argv[1] cfg = load_config(config_name) cfg.name = sys.argv[1] + '/' + sys.argv[2] net = GAN(cfg, restore=False) net.train()