Ejemplo n.º 1
0
 def train_loss(self, outputs, inputs):
     """ 
     train loss
     ------------
     recon_x: reconstructed images
     x: origin images
     mu: latent mean
     logvar: latent log variance
     """
     x = inputs['image']
     recon_x = outputs['recon_x']
     mu = outputs['mu']
     logvar = outputs['logvar']
     
     if self.use_gpu and not x.is_cuda:
         x = x.cuda()
         
     if self.use_gpu and not recon_x.is_cuda:
         recon_x = recon_x.cuda()
         
     if self.use_gpu and not mu.is_cuda:
         mu = mu.cuda()
         
     if self.use_gpu and not logvar.is_cuda:
         logvar = logvar.cuda()
     
     recon_loss = self.recon_loss(recon_x, x)
     KLD_loss, KLD_per_latent_dim, KLD_var = helper.KLD_loss(mu, logvar)
     total_loss = recon_loss + KLD_loss
     return {'total': total_loss, 'recon': recon_loss, 'KLD': KLD_loss}
Ejemplo n.º 2
0
 def valid_losses(self, outputs, inputs):
     x = inputs['image']
     recon_x = outputs['recon_x']
     mu = outputs['mu']
     logvar = outputs['logvar']
     
     if self.use_gpu and not x.is_cuda:
         x = x.cuda()
         
     if self.use_gpu and not recon_x.is_cuda:
         recon_x = recon_x.cuda()
         
     if self.use_gpu and not mu.is_cuda:
         mu = mu.cuda()
         
     if self.use_gpu and not logvar.is_cuda:
         logvar = logvar.cuda()
     
     BCE_loss = helper.BCE_with_digits_loss(recon_x, x) 
     MSE_loss = helper.MSE_loss(recon_x,x)
     if self.reconstruction_dist == "bernouilli":
         recon_loss = BCE_loss
     elif self.reconstruction_dist == "gaussian":
         recon_loss = MSE_loss
     KLD_loss, KLD_per_latent_dim, KLD_var = helper.KLD_loss(mu, logvar)
     total_loss = recon_loss + KLD_loss
     
     return {'total': total_loss, 'recon': recon_loss, 'BCE': BCE_loss, 'MSE': MSE_loss, 'KLD': KLD_loss, 'KLD_var': KLD_var}
    def valid_losses(self, outputs, inputs):
        """ train loss:
        recon_x: reconstructed images
        x: origin images
        mu: latent mean
        logvar: latent log variance
        """
        x = inputs['image']
        recon_x = outputs['recon_x']
        mu = outputs['mu']
        logvar = outputs['logvar']

        if self.use_gpu and not x.is_cuda:
            x = x.cuda()

        if self.use_gpu and not recon_x.is_cuda:
            recon_x = recon_x.cuda()

        if self.use_gpu and not mu.is_cuda:
            mu = mu.cuda()

        if self.use_gpu and not logvar.is_cuda:
            logvar = logvar.cuda()

        BCE = helper.BCE_with_digits_loss(recon_x, x)
        KLD, KLD_per_latent_dim, KLD_var = helper.KLD_loss(mu, logvar)
        total = (BCE + self.beta *
                 (KLD + float(self.add_var_to_KLD_loss) * KLD_var))

        return {'total': total, 'BCE': BCE, 'KLD': KLD, 'KLD_var': KLD_var}
Ejemplo n.º 4
0
 def train_loss(self, outputs, inputs):
     x = inputs['image']
     recon_x = outputs['recon_x']
     mu = outputs['mu']
     logvar = outputs['logvar']
     
     if self.use_gpu and not x.is_cuda:
         x = x.cuda()
         
     if self.use_gpu and not recon_x.is_cuda:
         recon_x = recon_x.cuda()
         
     if self.use_gpu and not mu.is_cuda:
         mu = mu.cuda()
         
     if self.use_gpu and not logvar.is_cuda:
         logvar = logvar.cuda()
     
     recon_loss = self.recon_loss(recon_x, x)
     KLD_loss, KLD_per_latent_dim, KLD_var = helper.KLD_loss(mu, logvar)
     total_loss = recon_loss + self.gamma * (KLD_loss - self.C).abs()
     return {'total': total_loss, 'recon': recon_loss, 'KLD': KLD_loss}
Ejemplo n.º 5
0
def test():
    # configuration file
    print("Loading the configuration ... \n")
    config = configuration.Config()
    model_init_params = config.model_init_params
    img_size = model_init_params['input_size']
    
    # load datasets 
    print("Loading the test dataset ... \n")
    test_dataset = DatasetHDF5(filepath=config.dataset_filepath,
                                split='test',
                                img_size = img_size)
    test_loader = DataLoader(test_dataset,
                              batch_size=config.test_batch_size,
                              shuffle=True)

    # test_npz_filepath = config.test_npz_filepath
    # test_dataset_npz = np.load(test_npz_filepath)
    # test_batch_size = 1
    # test_dataset = Dataset(img_size)
    # test_dataset.update(test_dataset_npz['observations'].shape[0], torch.from_numpy(test_dataset_npz['observations']).float(), test_dataset_npz['labels'])
    # test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=True)
    
    # load the trained model
    print("Loading the trained model ... \n")
    model_path = 'training/models/best_weight_model.pth'
    if os.path.exists(model_path):
            saved_model = torch.load (model_path, map_location='cpu')
            model_cls = getattr(ad.representations.static.pytorchnnrepresentation, saved_model['type'])
            model = model_cls (**saved_model['init_params'])        
            model.load_state_dict(saved_model['state_dict'])
            model.eval()
            model.use_gpu = False
    else:
        raise ValueError('The model {!r} does not exist!'.format(model_path))
        
    # output files
    output_testing_folder = 'testing'
    if os.path.exists(output_testing_folder):
        print('WARNING: testing folder already exists')
    else:
        os.makedirs(output_testing_folder)
    
    output_test_dataset_filename = os.path.join(output_testing_folder, 'output_test_dataset.npz')
    if os.path.exists(output_test_dataset_filename):
        print('WARNING: the output test dataset already exists, skipping the forward pass on test dataset')
        return
    
    
    # prepare output arrays
    input_size = saved_model['init_params']['input_size']
    n_latents = saved_model['init_params']['n_latents']
    test_x = np.empty((test_loader.__len__(), input_size[0], input_size[1]))
    test_recon_x = np.empty((test_loader.__len__(), input_size[0], input_size[1]))
    test_y = np.empty(test_loader.__len__())
    test_recon_y = np.empty((test_loader.__len__(), 3))
    test_mu = np.empty((test_loader.__len__(), n_latents))
    test_sigma = np.empty((test_loader.__len__(), n_latents)) # FOR AE: mu=z, sigma=0
    test_MSE_loss = np.empty(test_loader.__len__())
    test_BCE_loss = np.empty(test_loader.__len__())
    test_KLD_loss = np.empty(test_loader.__len__())
    test_KLD_loss_per_latent_dim = np.empty((test_loader.__len__(), n_latents))
    test_KLD_loss_var = np.empty(test_loader.__len__())
    test_CE_loss = np.empty(test_loader.__len__())
    
    
    # Loop over the test images:
    print("Testing the images ... \n")
    with torch.no_grad():
        idx = 0
    
        for data in test_loader:
            # input
            input_img = Variable(data['image'])
            input_label = Variable(data['label'])
            test_x[idx,:,:] = input_img.cpu().data.numpy().reshape((input_size[0], input_size[1]))
            test_y[idx] = input_label.cpu().data.numpy()
            # forward pass outputs
            outputs = model(input_img)
            
            test_mu[idx,:] = outputs['mu'].cpu().data.numpy().reshape(n_latents)
            test_sigma[idx,:] = outputs['logvar'].exp().sqrt().cpu().data.numpy().reshape(n_latents)
            test_recon_x[idx,:,:] = torch.sigmoid(outputs['recon_x']).cpu().data.numpy().reshape((input_size[0], input_size[1]))
            # compute reconstruction losses
            MSE_loss = helper.MSE_loss(torch.sigmoid(outputs['recon_x']), input_img).cpu().data.numpy().reshape(1)
            BCE_loss = helper.BCE_with_digits_loss(outputs['recon_x'], input_img).cpu().data.numpy().reshape(1)
            
            if 'recon_y' in outputs:
                test_recon_y[idx,:] = F.softmax(outputs['recon_y']).cpu().data.numpy().reshape(3)
                CE_loss = (F.cross_entropy(outputs['recon_y'], input_label, size_average=False) / input_label.size()[0])
            else:
                test_recon_y[idx,:] = np.zeros(3)
                CE_loss = 0
                
            
            KLD_loss, KLD_loss_per_latent_dim, KLD_loss_var = helper.KLD_loss(outputs['mu'], outputs['logvar'])
            test_MSE_loss[idx] = MSE_loss
            test_BCE_loss[idx] = BCE_loss
            test_CE_loss[idx] = CE_loss
            test_KLD_loss[idx] = KLD_loss.cpu().data.numpy().reshape(1)
            test_KLD_loss_per_latent_dim[idx,:] = KLD_loss_per_latent_dim.cpu().data.numpy().reshape(n_latents)
            test_KLD_loss_var[idx] = KLD_loss_var.cpu().data.numpy().reshape(1)
    
            idx += 1
        
        
    # Save in the experiment test folder
    print("Saving the results ... \n")
    np.savez(output_test_dataset_filename, x = test_x, recon_x = test_recon_x, y = test_y, recon_y = test_recon_y, mu = test_mu, sigma = test_sigma, MSE_loss = test_MSE_loss, BCE_loss = test_BCE_loss, KLD_loss = test_KLD_loss, KLD_loss_per_latent_dim = test_KLD_loss_per_latent_dim, KLD_loss_var = test_KLD_loss_var, CE_loss = test_CE_loss)
    return