recon_imgs = torch.cat(recon_imgs) compare = torch.cat((oirgin_imgs, recon_imgs.cpu().data)) save_image(compare.cpu(), output_path+'/fig1_3.jpg', nrow=10, normalize=True) # fig1_4 imgs = [] for i in range(32): z = Variable(torch.randn(1024).cuda()) out_img = model.decode(z) imgs.append(out_img) imgs = torch.cat(imgs) save_image(imgs.cpu().data, output_path+'/fig1_4.jpg', nrow=8, normalize=True) # fig1_5 data_for_tsne = torch.cat(data_for_tsne) label_for_tsne = torch.cat(label_for_tsne).numpy() mu, logvar = model.encode(Variable(data_for_tsne.cuda())) latent_code = mu.cpu().data.numpy() latent_emmbedded = TSNE(random_state=2).fit_transform(latent_code) plt.figure() # for i in [0,1]: # if i: # gender = 'Male' # else: # gender = 'Female' # xy = latent_emmbedded[label_for_tsne==i] # plt.scatter(xy[:,0], xy[:,1], c=i, label=gender) plt.scatter(latent_emmbedded[:,0], latent_emmbedded[:,1], c=label_for_tsne) # plt.legend() plt.savefig(output_path+'/fig1_5.jpg')
sum([len(raw_transitions[i]) for i in range(n_trajs)])) transitions = [] for i in range(n_trajs): for t in range(len(raw_transitions[i]) - 1): o, o_next, true_s, true_s_next = raw_transitions[i][t][0], \ raw_transitions[i][t + 1][0], \ raw_transitions[i][t][1]['state'], \ raw_transitions[i][t + 1][1]['state'] if o.sum() != 0 and o_next.sum() != 0: with torch.no_grad(): if not is_image: s = [np.array([1])] s_next = [np.array([1])] else: s = model.encode( Variable( torch.cuda.FloatTensor( np.transpose(o, (2, 0, 1))[None])))[1] s_next = model.encode( Variable( torch.cuda.FloatTensor( np.transpose(o_next, (2, 0, 1))[None])))[1] if i == 0 and t == 0: recon = model.decode(s) import ipdb ipdb.set_trace() save_image( torch.cat([ torch.cuda.FloatTensor( np.transpose(o, (2, 0, 1))[None]).cpu() / 255,