def printcoords(): File = filedialog.askopenfilename(parent=root, title='Choose an image.') print('changing data...') data_img = [] img = imread(File) img = Image.fromarray(img) img = fit(img, size=(64, 64)) img = transpose(img, (2, 0, 1)) data_img.append(img) np.save(opts.path, np.asarray(data_img)) test_dataset = TestData(label=opts.label, path=opts.path, transform=transforms.ToTensor()) dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=1) cvae = CVAE(200).to(device) cvae.load_params(opts.CVAE_PATH) evaluation_dir = opts.CVAE_PATH + 'evalFolder' try: os.mkdir(evaluation_dir) except: print('file already created') cvae.eval() test_x, test_y = iter(dataloader).next() for i in range(3): test_rec, test_mean, test_log_var, test_predict = cvae(test_x, test_y) save_image(test_x.data, join(evaluation_dir, 'input.png')) save_image(test_rec.data, join(evaluation_dir, 'output_test.png')) x = test_x.data y = test_y mu, logVar, y = cvae.encode(x) z = cvae.reparameterization(mu, logVar) sample1 = cvae.decode( torch.LongTensor(np.ones(y.size(), dtype=int)).type_as(z), z) sample2 = cvae.decode( torch.LongTensor(np.zeros(y.size(), dtype=int)).type_as(z), z) save_image(sample1.cpu().data, join(evaluation_dir, 'sample1.png')) save_image(sample2.cpu().data, join(evaluation_dir, 'sample2.png')) arr = ['input.png', 'sample1.png', 'sample2.png'] toImage = Image.new('RGBA', (584, 128)) for j in range(3): fromImge = Image.open(join(evaluation_dir, arr[j])) fromImge = fromImge.resize((128, 128), Image.ANTIALIAS) loc = (128 * j + 80, 0) toImage.paste(fromImge, loc) toImage.save('merged' + str(i) + '.png') arr = ['merged0.png', 'merged1.png', 'merged2.png'] toImage = Image.new('RGBA', (584, 384)) for j in range(3): fromImge = Image.open(arr[j]) loc = (0, 128 * j) toImage.paste(fromImge, loc) toImage.save('merged.png') filename = ImageTk.PhotoImage(Image.open('merged.png')) canvas.image = filename canvas.create_image(124, 10, anchor='nw', image=filename)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Dataset datasets, dataloaders, dataset_sizes = get_data(num_quadrant_inputs=1, batch_size=128) baseline_net = BaselineNet(500, 500) baseline_net.load_state_dict( torch.load('/Users/carlossouza/Downloads/baseline_net_q1.pth', map_location='cpu')) baseline_net.eval() cvae_net = CVAE(200, 500, 500, baseline_net) cvae_net.load_state_dict( torch.load('/Users/carlossouza/Downloads/cvae_net_q1.pth', map_location='cpu')) cvae_net.eval() visualize(device=device, num_quadrant_inputs=1, pre_trained_baseline=baseline_net, pre_trained_cvae=cvae_net, num_images=10, num_samples=10) # df = generate_table( # device=device, # num_quadrant_inputs=1, # pre_trained_baseline=baseline_net, # pre_trained_cvae=cvae_net, # num_particles=10, # col_name='{} quadrant'.format(1)