state_dict = torch.load(f) discard = [x for x in state_dict if x.startswith('fc1')] state = model.state_dict() state.update(state_dict) try: model.load_state_dict(state) except Exception: for key in discard: state_dict.pop(key) state = model.state_dict() state.update(state_dict) model.load_state_dict(state) load_ext = True if args.cuda: model.cuda() reconstruction_function = nn.MSELoss() reconstruction_function.size_average = False def loss_function(recon_x, x, mu, logvar): BCE = reconstruction_function(recon_x, x) # see Appendix B from VAE paper: # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 # https://arxiv.org/abs/1312.6114 # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar) KLD = torch.sum(KLD_element).mul_(-0.5)
# print(kld_history) # print(mse_history) plt.figure(figsize=(40, 10)) plt.subplot(121) plt.title('KLD loss') plt.plot(kld_history) plt.subplot(122) plt.title('MSE loss') plt.plot(mse_history) plt.savefig(output_path+'/fig1_2.jpg') model = VAE(64, 1e-5) model.load_state_dict(torch.load('VAE/vae_state_model.pth')) if cuda: model = model.cuda() # print(model) data_for_tsne = [] label_for_tsne = [] mse = 0 for (data, label) in test_dataloader: # print(data.size()) # if cuda: # data = data.cuda() # data = Variable(data.cuda()) # recon_img, mu, logvar = model(data) # loss = model.loss_function(data, recon_img, mu, logvar) # mse += torch.sum(model.latest_loss()[0]) if len(data_for_tsne) < 50: data_for_tsne.append(data)