コード例 #1
0
 def transition (self, z, temperature):
     
     h1 = self.relu(self.bn5(self.fc_trans_1(z)))
     h2 = self.relu(self.bn6(self.fc_trans_1_1(h1)))
     #h3 = self.relu(self.bn7(self.fc_trans_1_2(h2)))
     #h3 = torch.clamp(h3, min=0, max=5)
     
     mu = self.fc_z_mu(h2)
     sigma = self.fc_z_sigma(h2)
     
     eps = Variable(mu.data.new(mu.size()).normal_())
     #print (sigma)
     #print (args.sigma * temperature)
     if args.cuda:
         sigma = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature)).cuda()) + sigma  # TODO: Look into this 
     else:
         sigma = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature))) + sigma
     #print (sigma.data.shape)
     #z_new = mu + T.sqrt(args.sigma * temperature) * T.exp(0.5 * sigma) * eps
     #z_new = (z_new - T.mean(z_new, axis=0, keepdims=True)) / (0.001 + T.std(z_new, axis=0, keepdims=True))
     
     z_new = eps.mul(sigma.mul(0.5).exp_()).add_(mu)
     z_new = (z_new - z_new.mean(0))/(0.001+ z_new.std(0))
     #print (z_new.data.shape)
     #log_p_reverse = log_normal2(z, mu, T.log(args.sigma * temperature) + sigma, eps = 1e-6).sum(axis=1)
     #print z.data.shape, mu.data.shape, sigma.data.shape
     log_p_reverse = log_normal2(z, mu, sigma, eps = 1e-6).sum(1).mean()
     #print log_p_reverse.data.shape
     z_new = torch.clamp(z_new, min=-2, max=2)
     
     return z_new, log_p_reverse, sigma, h2
 def transition (self, z, temperature, step):
     #print ('z', np.isnan(z.data.cpu().numpy()).any())
     #    print z.requires_grad
     h1 = self.act(self.bn8_list[step](self.fc_trans_1(z)))
     #print h1
     h2 = self.act(self.bn9_list[step](self.fc_trans_1_1(h1)))
     #print h2
     h3 = self.act(self.bn10_list[step](self.fc_trans_1_2(h2)))
     h4 = self.act(self.bn10_1_list[step](self.fc_trans_1_3(h3)))
     h5 = self.act(self.bn10_2_list[step](self.fc_trans_1_4(h4)))
     
     #print h3
     h5 = torch.clamp(h5, min=0, max=5)
     #print h3
     
     mu = self.bn6_list[step](self.fc_z_mu(h5))  #### why not non-linearity applied here
     #print mu
     sigma = self.bn7_list[step](self.fc_z_sigma(h5))
     #print sigma
     #print ('mu', np.isnan(mu.data.cpu().numpy()).any())
     #print ('sigma', np.isnan(sigma.data.cpu().numpy()).any())
     eps = Variable(mu.data.new(mu.size()).normal_())
     
     #print ('eps', np.isnan(eps.data.cpu().numpy()).any())
     
     
     #print eps
     
     #z_new = mu + T.sqrt(args.sigma * temperature) * T.exp(0.5 * sigma) * eps
     #z_new = (z_new - T.mean(z_new, axis=0, keepdims=True)) / (0.001 + T.std(z_new, axis=0, keepdims=True))
     
     if args.cuda:
         sigma_ = Variable(torch.sqrt(torch.FloatTensor(1).fill_(args.sigma * temperature)).cuda())
         #print ('sigma_', np.isnan(sigma_.data.cpu().numpy()).any())
     
     else:
         sigma_ = Variable(torch.sqrt(torch.FloatTensor(1).fill_(args.sigma * temperature)))
     
     z_new = eps.mul(sigma.mul(0.5).exp_()).mul(sigma_).add_(mu)
     #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
     z_new = (z_new - z_new.mean(0))/(0.001+ z_new.std(0))
     #print ('z_new_mean', np.isnan(z_new.mean(0).data.cpu().numpy()).any())
     #print ('z_new_std', np.isnan(z_new.std(0).data.cpu().numpy()).any())
     #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
     
     
    
     if args.cuda:
         sigma_ = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature)).cuda()) + sigma
         #print ('sigma2', np.isnan(sigma_.data.cpu().numpy()).any())
     
     else:
         sigma_ = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature))) + sigma
     
     log_p_reverse = log_normal2(z, mu, sigma_, eps = 1e-6).mean()
     #print ('z', np.isnan(z.data.cpu().numpy()).any())
     #print ('log_p_reverse', log_p_reverse)
     z_new = torch.clamp(z_new, min=-4, max=4)
     #print z_new 
     return z_new, log_p_reverse, mu, sigma
コード例 #3
0
    def transition(self, z, temperature):
        #print ('z', np.isnan(z.data.cpu().numpy()).any())
        h1 = self.relu(self.bn5(self.fc_trans_1(z)))
        h2 = self.relu(self.bn6(self.fc_trans_1_1(h1)))
        h3 = self.relu(self.bn7(self.fc_trans_1_2(h2)))
        h3 = torch.clamp(h3, min=0, max=5)

        mu = self.fc_z_mu(h3)
        sigma = self.fc_z_sigma(h3)

        #print ('mu', np.isnan(mu.data.cpu().numpy()).any())
        #print ('sigma', np.isnan(sigma.data.cpu().numpy()).any())
        eps = Variable(mu.data.new(mu.size()).normal_())

        #print ('eps', np.isnan(eps.data.cpu().numpy()).any())

        #print eps

        #z_new = mu + T.sqrt(args.sigma * temperature) * T.exp(0.5 * sigma) * eps
        #z_new = (z_new - T.mean(z_new, axis=0, keepdims=True)) / (0.001 + T.std(z_new, axis=0, keepdims=True))

        if args.cuda:
            sigma_ = Variable(
                torch.sqrt(
                    torch.FloatTensor(1).fill_(args.sigma *
                                               temperature)).cuda())
            #print ('sigma_', np.isnan(sigma_.data.cpu().numpy()).any())

        else:
            sigma_ = Variable(
                torch.sqrt(
                    torch.FloatTensor(1).fill_(args.sigma * temperature)))

        z_new = eps.mul(sigma.mul(0.5).exp_()).mul(sigma_).add_(mu)
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
        z_new = (z_new - z_new.mean(0)) / (0.001 + z_new.std(0))
        #print ('z_new_mean', np.isnan(z_new.mean(0).data.cpu().numpy()).any())
        #print ('z_new_std', np.isnan(z_new.std(0).data.cpu().numpy()).any())
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())

        if args.cuda:
            sigma_ = Variable(
                torch.log(
                    torch.FloatTensor(1).fill_(
                        args.sigma * temperature)).cuda()) + sigma
            #print ('sigma2', np.isnan(sigma_.data.cpu().numpy()).any())

        else:
            sigma_ = Variable(
                torch.log(
                    torch.FloatTensor(1).fill_(
                        args.sigma * temperature))) + sigma

        log_p_reverse = log_normal2(z, mu, sigma_, eps=1e-6).sum(1).mean()
        #print ('z', np.isnan(z.data.cpu().numpy()).any())
        #print ('log_p_reverse', log_p_reverse)
        z_new = torch.clamp(z_new, min=-4, max=4)

        return z_new, log_p_reverse, sigma, h2
コード例 #4
0
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
        z_new = (z_new - z_new.mean(0))/(0.001+ z_new.std(0))
        #print ('z_new_mean', np.isnan(z_new.mean(0).data.cpu().numpy()).any())
        #print ('z_new_std', np.isnan(z_new.std(0).data.cpu().numpy()).any())
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
        
        
       
        if args.cuda:
            sigma_ = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature)).cuda()) + sigma
            #print ('sigma2', np.isnan(sigma_.data.cpu().numpy()).any())
        
        else:
            sigma_ = Variable(torch.log(torch.FloatTensor(1).fill_(args.sigma * temperature))) + sigma
        
        log_p_reverse = log_normal2(z, mu, sigma_, eps = 1e-6).mean()
        #print ('z', np.isnan(z.data.cpu().numpy()).any())
        #print ('log_p_reverse', log_p_reverse)
        z_new = torch.clamp(z_new, min=-4, max=4)
        #print z_new 
<<<<<<< HEAD
        return z_new, log_p_reverse, sigma, h2
        
     
    def decode (self, z_new):
        #print z_new
        d0 = self.relu(self.bn8(self.fc_z_x_1(z_new)))
        #print d0
        d0 = d0.view(-1, 256, 8, 8)
        d1 = self.relu(self.bn9(self.conv_z_x_2(d0)))
        #print d1
コード例 #5
0
    def transition(self, z, temperature, step):
        #print ('z', np.isnan(z.data.cpu().numpy()).any())
        #    print z.requires_grad
        h = self.act(self.bn7_list[step](self.fc_trans_1(z)))
        #print (h.shape)
        h = self.act(self.bn8_list[step](self.fc_trans_2(h)))
        #print (h.shape)
        h = self.act(self.bn9_list[step](self.fc_trans_3(h)))
        #print (h.shape)

        if self.args.transition_steps > 3:
            h = self.act(self.bn10_list[step](self.fc_trans_4(h)))
            h = self.act(self.bn11_list[step](self.fc_trans_5(h)))

        #print h3
        #h = torch.clamp(h, min=0, max=5)
        #print (h.shape)
        mu = self.fc_z_mu(
            h
        )  #mu = self.bn5_list[step](self.fc_z_mu(h))  #### use h3 for three layers in the transition operator
        #print mu
        sigma = self.fc_z_sigma(
            h)  #sigma = self.bn6_list[step](self.fc_z_sigma(h))
        #print sigma
        #print ('mu', np.isnan(mu.data.cpu().numpy()).any())
        #print ('sigma', np.isnan(sigma.data.cpu().numpy()).any())
        eps = Variable(mu.data.new(mu.size()).normal_())

        #print ('eps', np.isnan(eps.data.cpu().numpy()).any())
        #z_new = mu + T.sqrt(args.sigma * temperature) * T.exp(0.5 * sigma) * eps
        #z_new = (z_new - T.mean(z_new, axis=0, keepdims=True)) / (0.001 + T.std(z_new, axis=0, keepdims=True))

        if self.args.cuda:
            sigma_ = Variable(
                torch.sqrt(
                    torch.FloatTensor(1).fill_(self.args.sigma *
                                               temperature)).cuda())
            #print ('sigma_', np.isnan(sigma_.data.cpu().numpy()).any())

        else:
            sigma_ = Variable(
                torch.sqrt(
                    torch.FloatTensor(1).fill_(self.args.sigma * temperature)))

        z_new = eps.mul(sigma.mul(0.5).exp_()).mul(sigma_).add_(mu)
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())
        #z_new = (z_new - z_new.mean(0))/(0.001+ z_new.std(0))
        #print ('z_new_mean', np.isnan(z_new.mean(0).data.cpu().numpy()).any())
        #print ('z_new_std', np.isnan(z_new.std(0).data.cpu().numpy()).any())
        #print ('z_new', np.isnan(z_new.data.cpu().numpy()).any())

        if self.args.cuda:
            sigma_ = Variable(
                torch.log(
                    torch.FloatTensor(1).fill_(
                        self.args.sigma * temperature)).cuda()) + sigma
            #print ('sigma2', np.isnan(sigma_.data.cpu().numpy()).any())

        else:
            sigma_ = Variable(
                torch.log(
                    torch.FloatTensor(1).fill_(
                        self.args.sigma * temperature))) + sigma

        log_p_reverse = log_normal2(z, mu, sigma_, eps=1e-6).mean()
        #print ('z', np.isnan(z.data.cpu().numpy()).any())
        #print ('log_p_reverse', log_p_reverse)
        z_new = torch.clamp(z_new, min=-4, max=4)
        #print z_new
        return z_new, log_p_reverse, mu, sigma