def main(_): with tf.Session() as sess: rdn = RDN(sess, image_size=FLAGS.image_size, is_train=FLAGS.is_train, scale=FLAGS.scale, c_dim=FLAGS.c_dim, batch_size=FLAGS.batch_size, test_img=FLAGS.test_img, D=FLAGS.D, C=FLAGS.C, G=FLAGS.G, G0=FLAGS.G0, kernel_size=FLAGS.kernel_size) rdn.train(FLAGS)
def main(_): rdn = RDN(tf.Session(), image_size=FLAGS.image_size, is_train=FLAGS.is_train, scale=FLAGS.scale, c_dim=FLAGS.c_dim, batch_size=FLAGS.batch_size, test_img=FLAGS.test_img, D=FLAGS.D, C=FLAGS.C, G=FLAGS.G, G0=FLAGS.G0, kernel_size=FLAGS.kernel_size) if rdn.is_train: rdn.train(FLAGS) else: rdn.test(FLAGS)
if torch.cuda.is_available(): netG.cuda() netD.cuda() generator_criterion.cuda() optimizerG = optim.Adam(netG.parameters()) optimizerD = optim.Adam(netD.parameters()) results = {'d_loss': [], 'g_loss': [], 'd_score': [], 'g_score': []} for epoch in range(1, NUM_EPOCHS + 1): train_bar = tqdm(train_loader) running_results = {'batch_sizes': 0, 'd_loss': 0, 'g_loss': 0, 'd_score': 0, 'g_score': 0} netG.train() netD.train() for data, target in train_bar: g_update_first = True batch_size = data.size(0) running_results['batch_sizes'] += batch_size ############################ # (1) Update D network: maximize D(x)-1-D(G(z)) ########################### real_img = Variable(target) if torch.cuda.is_available(): real_img = real_img.cuda() z = Variable(data) if torch.cuda.is_available(): z = z.cuda()
import data import argparse from model import RDN parser = argparse.ArgumentParser() parser.add_argument("--dataset", default="data/General-100") parser.add_argument("--testset", default="") parser.add_argument("--imgsize", default=128, type=int) parser.add_argument("--scale", default=4, type=int) parser.add_argument("--globallayers", default=16, type=int) parser.add_argument("--locallayers", default=8, type=int) parser.add_argument("--featuresize", default=64, type=int) parser.add_argument("--batchsize", default=16, type=int) parser.add_argument("--savedir", default='saved_models') parser.add_argument("--iterations", default=10000, type=int) parser.add_argument("--usepre", default=0, type=int) args = parser.parse_args() data.load_dataset(args.dataset, args.testset, args.imgsize) down_size = args.imgsize // args.scale network = RDN(down_size, args.globallayers, args.locallayers, args.featuresize, args.scale) network.set_data_fn(data.get_batch, (args.batchsize, args.imgsize, down_size), data.get_test_set, (args.imgsize, args.scale)) network.train(args.iterations, args.savedir, args.usepre)