b = train_cond[sindex] print(a.shape) print(b.shape) a = np.reshape(a, (-1, 32, 130)) b = np.reshape(b, (-1, 32, 12)) print(a.shape) print(b.shape) #initialize model model = VAE(130, 2048, 3, 12, 128, 128, 32) model.eval() dic = torch.load("vae/tr_chord.pt") for name in list(dic.keys()): dic[name.replace('module.', '')] = dic.pop(name) model.load_state_dict(dic) if torch.cuda.is_available(): model = model.cuda() print(model) a = torch.from_numpy(a).float() b = torch.from_numpy(b).float() res = model.encoder(a, b) z1 = res[0].loc.detach().numpy() z2 = res[1].loc.detach().numpy() print(z1) print(z1.shape) print(z2) print(z2.shape) z1 = torch.from_numpy(z1).float() z2 = torch.from_numpy(z2).float() res = model.decoder(z1, z2, b)
from torchvision import transforms from torchvision.utils import save_image import numpy as np from tqdm import trange from sa.model import MnistClassifier from vae.model import VAE img_size = 28*28*1 torch.no_grad() # since nothing is trained here ### Prep (e.g. Load Models) ### vae = VAE(img_size = 28*28, h_dim = 1600, z_dim = 400) vae.load_state_dict(torch.load('./vae/models/MNIST_EnD.pth')) vae.cuda() classifier = MnistClassifier(img_size = img_size) classifier.load_state_dict(torch.load('./sa/models/MNIST_conv_classifier.pth')) classifier.eval() classifier.cuda() print("models loaded...") test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=False) test_data_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=1, shuffle=True) print("Data loader ready...") ### GA Params ### gen_num = 500 pop_size = 50 best_left = 20