def check_dataset(dataset, dataroot, augment, download): if dataset == "cifar10": cifar10 = get_CIFAR10(augment, dataroot, download) input_size, num_classes, train_dataset, test_dataset = cifar10 if dataset == "svhn": svhn = get_SVHN(augment, dataroot, download) input_size, num_classes, train_dataset, test_dataset = svhn if dataset == "mnist": mnist = get_MNIST(augment, dataroot, download) input_size, num_classes, train_dataset, test_dataset = mnist return input_size, num_classes, train_dataset, test_dataset
def check_dataset(dataset, dataroot, augment, download): if dataset == 'cifar10': cifar10 = get_CIFAR10(augment, dataroot, download) input_size, num_classes, train_dataset, test_dataset = cifar10 if dataset == 'svhn': svhn = get_SVHN(augment, dataroot, download) input_size, num_classes, train_dataset, test_dataset = svhn if dataset == 'mnist': mnist = get_MNIST(False, dataroot, download) input_size, num_classes, train_dataset, test_dataset = mnist return input_size, num_classes, train_dataset, test_dataset
def main(args,kwargs): output_folder = args.output_folder model_name = args.model_name with open(os.path.join(output_folder,'hparams.json')) as json_file: hparams = json.load(json_file) image_shape, num_classes, _, test_mnist = get_MNIST(False, hparams['dataroot'], hparams['download']) test_loader = data.DataLoader(test_mnist, batch_size=32, shuffle=False, num_workers=6, drop_last=False) x, y = test_loader.__iter__().__next__() x = x.to(device) model = Glow(image_shape, hparams['hidden_channels'], hparams['K'], hparams['L'], hparams['actnorm_scale'], hparams['flow_permutation'], hparams['flow_coupling'], hparams['LU_decomposed'], num_classes, hparams['learn_top'], hparams['y_condition'], False if 'logittransform' not in hparams else hparams['logittransform'],False if 'sn' not in hparams else hparams['sn']) model.load_state_dict(torch.load(os.path.join(output_folder, model_name))) model.set_actnorm_init() model = model.to(device) model = model.eval() with torch.no_grad(): # ipdb.set_trace() images = model(y_onehot=None, temperature=1, batch_size=32, reverse=True).cpu() better_dup_images = model(y_onehot=None, temperature=1, z=model._last_z, reverse=True, use_last_split=True).cpu() dup_images = model(y_onehot=None, temperature=1, z=model._last_z, reverse=True).cpu() worse_dup_images = model(y_onehot=None, temperature=1, z=model._last_z, reverse=True).cpu() l2_err = torch.pow((images - dup_images).view(images.shape[0], -1), 2).sum(-1).mean() better_l2_err = torch.pow((images - better_dup_images).view(images.shape[0], -1), 2).sum(-1).mean() worse_l2_err = torch.pow((images - worse_dup_images).view(images.shape[0], -1), 2).sum(-1).mean() print(l2_err, better_l2_err, worse_l2_err) plot_imgs([images, dup_images, better_dup_images, worse_dup_images], '_recons') # with torch.no_grad(): # ipdb.set_trace() z, nll, y_logits = model(x, None) better_dup_images = model(y_onehot=None, temperature=1, z=z, reverse=True, use_last_split=True).cpu() plot_imgs([x, better_dup_images], '_data_recons2') fpath = os.path.join(output_folder, '_recon_evoluation.png') pad = run_recon_evolution(model, x, fpath)
import matplotlib.pyplot as plt from datasets import get_CIFAR10, get_SVHN, get_MNIST, postprocess from model import Glow import ipdb import os device = torch.device("cuda") output_folder = '/scratch/gobi2/wangkuan/glow/db-gan' # output_folder = '/scratch/gobi2/wangkuan/glow/mnist-1x1-affine-512-1e-2' model_name = 'glow_model_1.pth' with open(os.path.join(output_folder, 'hparams.json')) as json_file: hparams = json.load(json_file) image_shape, num_classes, _, test_mnist = get_MNIST(False, hparams['dataroot'], hparams['download']) model = Glow( image_shape, hparams['hidden_channels'], hparams['K'], hparams['L'], hparams['actnorm_scale'], hparams['flow_permutation'], hparams['flow_coupling'], hparams['LU_decomposed'], num_classes, hparams['learn_top'], hparams['y_condition'], False if 'logittransform' not in hparams else hparams['logittransform']) model.load_state_dict(torch.load(os.path.join(output_folder, model_name))) model.set_actnorm_init() model = model.to(device) model = model.eval()
network.add_synapses(Synapses( network.groups['Ae'], network.groups['Ai'], w=torch.diag(c_excite * torch.ones_like(torch.Tensor(n_neurons)))), source='Ae', target='Ai') network.add_synapses(Synapses(network.groups['Ai'], network.groups['Ae'], w=-c_inhib * \ (torch.ones_like(torch.Tensor(n_neurons, n_neurons)) - torch.diag(1 \ * torch.ones_like(torch.Tensor(n_neurons))))), source='Ai', target='Ae') # network.add_monitor(Monitor(obj=network.groups['Ae'], state_vars=['v', 'theta']), name=('Ae', ('v', 'theta'))) # network.add_monitor(Monitor(obj=network.groups['Ai'], state_vars=['v']), name=('Ai', 'v')) # Get training or test data from disk. if mode == 'train': data = get_MNIST(train=True) elif mode == 'test': data = get_MNIST(train=False) X, y = data['X'], data['y'] # Count spikes from each neuron on each example (between update intervals). outputs = torch.zeros_like(torch.Tensor(update_interval, n_neurons)) # Network simulation times. image_time = time rest_time = rest # Voting schemes and neuron label assignments. voting_schemes = ['all'] rates = torch.zeros_like(torch.Tensor(n_neurons, 10))