def main(argv): os.environ['CUDA_VISIBLE_DEVICES'] = '%d' % FLAGS.gpu if FLAGS.name is None: FLAGS.name = FLAGS.model.lower() if FLAGS.model == 'CSGAN': model = CSGAN(FLAGS) elif FLAGS.model == 'ACGAN': model = ACGAN(FLAGS) elif FLAGS.model == 'CGAN': model = CGAN(FLAGS) if FLAGS.validate: model.validate(glasses=FLAGS.glasses, male=FLAGS.male) else: model.train(epochs=1000000, batch_size=FLAGS.batch_size, sample_interval=200, start_point=FLAGS.load_model + 1)
from acgan import ACGAN import torch import torch.backends.cudnn import torchvision.datasets as datasets import torchvision.transforms as transforms import torch.utils.data import torch.optim as optim import torch.nn as nn device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True ac_gan = ACGAN(100, 10, 1, device) print(ac_gan) dataset = datasets.MNIST(root='./data', download=True, transform=transforms.Compose([ transforms.Resize(64), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )) ])) dataloader = torch.utils.data.DataLoader(dataset, batch_size=100, shuffle=True, num_workers=2) src_criterion = nn.MSELoss() c_criterion = nn.CrossEntropyLoss()
usage = np.expand_dims(usage, axis=-1) gen = np.expand_dims(gen, axis=-1) x = np.concatenate((usage, gen), axis=-1) num_train = 365 * 3 x_train = x[:num_train] x_val = x[num_train:] month_label_train = month_label[:num_train] month_label_val = month_label[num_train:] day_label_train = day_label[:num_train] day_label_val = day_label[num_train:] print(x_train.shape, x_val.shape, month_label_train.shape, month_label_val.shape, day_label_train.shape, day_label_val.shape) weight_path = 'weights/pecan' + '_user_' + str(user_id) + '_' model = Model(input_dim=2, window_length=96, weight_path=weight_path) if train: num_epoch = args['num_epoch'] print(model.discriminator.summary()) print(model.generator.summary()) model.train([x_train, month_label_train, day_label_train], [x_val, month_label_val, day_label_val], num_epoch=num_epoch) else: x_generated = model.generate_by_date(1461) usage_generated = x_generated[:, :, 0] gen_generated = x_generated[:, :, 1] usage_generated_recover = usage_recover(usage_generated) gen_generated_recover = gen_recover(gen_generated) data = np.stack((usage_generated_recover, gen_generated_recover),
def main(): # parse arguments args = parse_args() if args is None: exit() if args.benchmark_mode: torch.backends.cudnn.benchmark = True # declare instance for GAN if args.gan_type == 'GAN': gan = GAN(args) elif args.gan_type == 'CGAN': gan = CGAN(args) elif args.gan_type == 'ACGAN': gan = ACGAN(args) elif args.gan_type == 'DSGAN': gan = DSGAN(args) elif args.gan_type == 'SNGAN': gan = SNGAN(args) else: raise Exception("[!] There is no option for " + args.gan_type) if args.mode == 'train': # launch the graph in a session gan.train() print(" [*] Training finished!") # visualize learned generator gan.visualize_results(args.epoch) print(" [*] Testing finished!") elif args.mode == 'evaluate': print(" [*] Compute the Lipschitz parameter") gan.get_lipschitz() print("") # print(" [*] Compute the inception score") # if args.dataset == 'mnist': # model = SmallCNN() # model.load_state_dict(torch.load('generative/pretrained/small_cnn/mnist.pt')) # dataset = dset.MNIST(root='data/mnist/', train=False, # download=True, transform=transforms.ToTensor()) # img_size = 28 # n_class = 10 # elif args.dataset == 'fashion-mnist': # model = SmallCNN() # model.load_state_dict(torch.load('generative/pretrained/small_cnn/fashion-mnist.pt')) # dataset = dset.FashionMNIST(root='data/fashion-mnist/', train=False, # download=True, transform=transforms.ToTensor()) # img_size = 28 # n_class = 10 # elif args.dataset == 'cifar10': # model = inception_v3(pretrained=True, transform_input=False) # transform = transforms.Compose([transforms.ToTensor(), # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) # dataset = dset.CIFAR10(root='data/cifar10/', download=True, transform=transform) # img_size = 299 # n_class = 1000 # else: # raise Exception("[!] There is no option for " + args.dataset) # if args.gpu_mode: # model = model.cuda() # model.eval() # print("Calculating Inception Score for originial dataset...") # IS_origin = inception_score(IgnoreLabelDataset(dataset), model, cuda=args.gpu_mode, # batch_size=32, img_size=img_size, n_class=n_class, resize=True, splits=10) # print(IS_origin[0]) # # test_sample_path = 'data/'+args.dataset+'/'+args.gan_type+'/'+'samples_test.npy' # # test_label_path ='data/'+args.dataset+'/'+args.gan_type+'/'+'labels_test.npy' # test_path = 'data/'+args.dataset+'/'+args.gan_type+'/'+'test.npz' # dataset_acgan = CustomLabelDataset(test_path, args.input_size, # args.input_size, args.channels, transform=transforms.ToTensor()) # print ("Calculating Inception Score for ACGAN...") # IS_gan = inception_score(IgnoreLabelDataset(dataset_acgan), model, cuda=args.gpu_mode, # batch_size=32, img_size=img_size, n_class=n_class, resize=True, splits=10) # print(IS_gan[0]) # # save the inception score # IS_log = open(args.log_dir+'/'+args.dataset+'/'+args.gan_type+'/ACGAN_IS.txt', 'w') # print("%.4f, %.4f" % (IS_origin[0], IS_gan[0]), file=IS_log) elif args.mode == 'reconstruct': print(" [*] Reconstruct "+args.dataset+" dataset using "+args.gan_type) gan.reconstruct() else: raise Exception("[!] There is no option for " + args.mode)
def inference(FLAG): FLAG_save_dir = FLAG.save_dir FLAG_plot_dir = FLAG.plot_dir FLAG_n_dim = 100 gan = ACGAN() gan.build(n_dim=FLAG_n_dim, shape=(64, 64, 3)) def initialize_uninitialized(sess): global_vars = tf.global_variables() is_not_initialized = sess.run( [tf.is_variable_initialized(var) for var in global_vars]) not_initialized_vars = [ v for (v, f) in zip(global_vars, is_not_initialized) if not f ] if len(not_initialized_vars): sess.run(tf.variables_initializer(not_initialized_vars)) def res_plot(samples, n_row, n_col): fig = plt.figure(figsize=(n_col * 2, n_row * 2)) gs = gridspec.GridSpec(n_row, n_col) gs.update(wspace=0.05, hspace=0.05) for i, sample in enumerate(samples): ax = plt.subplot(gs[i]) plt.axis('off') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_aspect('equal') plt.imshow(sample.reshape(64, 64, 3)) return fig with tf.Session() as sess: if FLAG_save_dir is not None: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(FLAG_save_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored %s" % ckpt.model_checkpoint_path) sess.run(tf.global_variables()) else: print("No model checkpoint in %s" % FLAG_save_dir) else: sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables()) print("Initialized") print("Plot saved in %s" % FLAG_plot_dir) # re-initialize initialize_uninitialized(sess) random_vec = np.random.uniform(-1, 1, [10, gan.n_dim]).astype(np.float32) random_vec = np.repeat(random_vec, 2, axis=0) aux_vec = np.expand_dims(np.repeat([0, 1], 10), axis=1) # plot np.random.seed(296) Xplot = sess.run(gan.G_image, feed_dict={ gan.random_sample: random_vec, gan.aux_labels: aux_vec, gan.is_train: False }) fig = res_plot(Xplot, 2, 10) plt.savefig(os.path.join(FLAG_plot_dir, 'fig3_3.jpg'), bbox_inches='tight') plt.close(fig)
import argparse parser = argparse.ArgumentParser(description='Main script') parser.add_argument('--data_dir', type=str, default='C:/Users/Jonas/Documents/GitHub/pokemon-generation/data/sprites') parser.add_argument('--name', type=str, default='gan') parser.add_argument('--type', type=str, default='dcgan', help='GAN Type') parser.add_argument('--spectral_norm', action='store_true') parser.add_argument('--epochs', type=int, default=1000) parser.add_argument('--batch_size', type=int, default=64) parser.add_argument('--sample_interval', type=int, default=5) parser.add_argument('--label_column', type=str, default='type_1') args = parser.parse_args() from dcgan import DCGAN from acgan import ACGAN config = {'spectral_norm': args.spectral_norm} if args.type == 'dcgan': model = DCGAN(name=args.name, config=config) elif args.type == 'acgan': model = ACGAN(name=args.name, label_column=args.label_column, config=config) model.fit(args.data_dir, args.epochs, args.batch_size, args.sample_interval)
to_file="{}/{}/{}/net_g.png".format(args.output_dir, model_name, date_str)) tf.keras.utils.plot_model(m.d, show_shapes=True, expand_nested=True, dpi=150, to_file="{}/{}/{}/net_d.png".format(args.output_dir, model_name, date_str)) except Exception as e: print(e) return logger if __name__ == "__main__": utils.set_soft_gpu(args.soft_gpu) cifar = CIFAR(n_class=args.label_dim) (x_train, y_train), (x_test, y_test) = cifar.load() print("x_shape:", x_train.shape, " x_type:", x_train.dtype, " y_shape:", y_train.shape, " y_type:", y_train.dtype) model_name = args.model summary_writer = tf.summary.create_file_writer('{}/{}/{}'.format(args.output_dir, model_name, date_str)) if model_name == "acgan": d = utils.get_ds(args.batch_size // 2, x_train, y_train) m = ACGAN(args.latent_dim, args.label_dim, x_train.shape[1:], a=-1, b=1, c=1, summary_writer=summary_writer, lr=args.lr, beta1=args.beta1, beta2=args.beta2, net=args.net) logger = init_logger(model_name, date_str, m) train(m, d) elif model_name == "acgangp": x_train, y_train = utils.convert_to_tensor(x_train, y_train) m = ACGANgp(args.latent_dim, args.label_dim, x_train.shape[1:], args.lambda_, summary_writer=summary_writer, lr=args.lr, beta1=args.beta1, beta2=args.beta2, net=args.net) logger = init_logger(model_name, date_str, m) traingp(m, x_train, y_train) else: raise ValueError("model name error")