def evaluate_IWAE(self,K, inputs,x_tilda, mu, log_variance):
       
       z_samples, mu_h1, sigma, eps_samples = self.reparametrize_iwae(K, mu , log_variance)
 
       log_Qz_x = torch.sum(-0.5*(eps_samples)**2 - torch.log(sigma), -1)       
       #K,128. first dimension basically contains how many samples taken
       
       p_set=[]
       for z in z_samples:      
         p = self.decoder(z)
         p_set.append(p)
       p_x_given_z=torch.stack(p_set)
       
       log_P_z = torch.sum(-0.5*z_samples**2, -1)
       #K, 128
             
       cross_entropy = F.binary_cross_entropy(x_tilda, inputs, reduction='none')
       cross_entropy= cross_entropy.sum(-1).sum(-1).sum(-1)
       
       #doing normalisation
       log_weight = log_P_z - cross_entropy - log_Qz_x
       log_weight = log_weight - torch.max(log_weight, 0)[0]
       weight = torch.exp(log_weight)
       weight = weight / torch.sum(weight, 0)
       weight = Variable(weight.data, requires_grad = False)
       
       loss = torch.sum(torch.sum(weight * (log_P_z - cross_entropy - log_Qz_x) , 0))
       return loss      
def loss_function(x_tilda, x, mu, log_variance):
  #  print('size of x tilda:',x_tilda.size(), ':x size: ', x.size() )
    cross_entropy = F.binary_cross_entropy(x_tilda, x, reduction='sum')
    #print('cross entropy: ', cross_entropy)
    #cross_entropy/= BATCH_SIZE    

    Kl_divergence = -0.5 * torch.sum(1 + log_variance - mu.pow(2) - log_variance.exp())
    
    #Kl_divergence/= BATCH_SIZE
    
    #print('cross entropy: ', cross_entropy,' : KL_divergence: ',Kl_divergence )
    return cross_entropy + Kl_divergence
Пример #3
0
def Loss(x,rec_x,mean,logvar):
    bce_loss=F.binary_cross_entropy(input=rec_x,target=x,size_average=False)
    bld_loss=0.5*torch.sum(mean.pow(2)+logvar.exp()-logvar-1)
    return(bce_loss+bld_loss)
Пример #4
0
def loss_function(x_tild, x, mu, log_sigma):
    x = x.view(-1, 784)
    x_tild = x_tild.view(-1, 784)
    Bce = F.binary_cross_entropy(x_tild, x, reduction='sum')
    KL_D = 0.5 * torch.sum(1 + log_sigma - mu**2 - log_sigma.exp())
    return Bce - KL_D