示例#1
0
    def calculate_loss(self, x, beta=1., average=False):
        # pass through VAE
        x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(
            x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            print(x.shape, x_mean.shape, x_logvar.shape)
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
        log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
        log_p_z2 = self.log_p_z2(z2_q)
        log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
        KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

        # full loss
        loss = -RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
        log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
        log_p_z2 = self.log_p_z2(z2_q)
        log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
        KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

        loss = -RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
    def calculate_loss(self, x, beta=1., average=False):
        # pass through VAE
        x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
        log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
        log_p_z2 = self.log_p_z2(z2_q)
        log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
        KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

        # full loss
        loss = -RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
示例#4
0
    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x = x.view(-1, np.prod(self.args.input_size))
        x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z = self.log_p_z(z_q)
        log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
        KL = -(log_p_z - log_q_z)
        if self.isnan(z_q_mean.data[0][0]):
            print("mean:")
            print(z_q_mean)
        if self.isnan(z_q_logvar.data[0][0]):
            print("var:")
            print(z_q_logvar)

        loss = -RE + beta * KL

        #FI
        if self.args.FI is True:
            FI, gamma = self.FI(x)
            #loss -= torch.mean(FI * gamma, dim = 1)
        else:
            FI, gamma = self.FI(x)
            FI *= 0.
        #FI = (torch.mean((torch.log(2*torch.pow(torch.exp( z_q_logvar ),2) + 1) - 2 * z_q_logvar)) - self.args.M ).abs()
        #FI = (torch.mean((1/torch.exp( z_q_logvar ) + 1/(2*torch.pow( torch.exp( z_q_logvar ), 2 )))) - self.args.M ).abs()

        # MI
        if self.args.MI is True:
            MI = self.MI(x)
            #loss += self.args.ksi * (MI -self.args.M).abs()
        else:
            MI = self.MI(x) * 0.

        if self.args.adv is True:
            loss += self.args.ksi * (torch.exp(MI) - FI).abs()

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)
            FI = torch.mean(FI)
            MI = torch.mean(torch.exp(MI))

        return loss, RE, KL, FI, MI
示例#5
0
    def log_q_x_vampprior(self, x):
        # z - MB x M
        C = self.args.number_components

        # calculate params
        Z1 = self.means_z1(self.idle_input_z).view(-1, self.args.z1_size)
        Z2 = self.means_z2(self.idle_input_z).view(-1, self.args.z2_size)

        # calculate params for given data
        q_x_mean, q_x_logvar = self.p_x(z1=Z1, z2=Z2)  # C x M)

        # expand x
        x_expand = x.unsqueeze(1)
        means = q_x_mean.unsqueeze(0)

        if self.args.input_type == 'binary':
            a = log_Bernoulli(x_expand, means, dim=2) - math.log(C)  # MB x C
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            logvars = q_x_logvar.unsqueeze(0)
            a = -log_Logistic_256(x_expand, means, logvars, dim=2) - math.log(
                C)  # MB x C

        a_max, _ = torch.max(a, 1)  # MB
        # calculte log-sum-exp
        log_prior = (a_max +
                     torch.log(torch.sum(torch.exp(a - a_max.unsqueeze(1)), 1))
                     )  # MB

        return log_prior
示例#6
0
    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'multinomial':
            RE = log_Softmax(
                x, x_mean,
                dim=1)  #! Actually not Reconstruction Error but Log-Likelihood
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z = self.log_p_z(z_q)
        log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
        KL = -(log_p_z - log_q_z)

        loss = -RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
示例#7
0
    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z = self.log_p_z(z_q)
        log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
        KL = -(log_p_z - log_q_z)

        loss = - RE + beta * KL

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        return loss, RE, KL
示例#8
0
    def calculate_loss(self, x, beta=1., average=False):
        '''
        :param x: input image(s)
        :param beta: a hyperparam for warmup
        :param average: whether to average loss or not
        :return: value of a loss function
        '''
        # pass through VAE
        x_mean, x_logvar, z_q, z_q_mean, z_q_logvar = self.forward(x)

        # RE
        if self.args.input_type == 'binary':
            RE = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            RE = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        # KL
        log_p_z = self.log_p_z(z_q)
        log_q_z = log_Normal_diag(z_q, z_q_mean, z_q_logvar, dim=1)
        KL = -(log_p_z - log_q_z)
        if self.isnan(z_q_mean.data[0][0]):
            print("mean:")
            print(z_q_mean)
        if self.isnan(z_q_logvar.data[0][0]):
            print("var:")
            print(z_q_logvar)

        #print(z_q_logvar)

        #FI
        if self.args.FI is True:
            FI, gamma = self.FI(x)
        else:
            FI = Variable(torch.zeros(1), requires_grad=False)
            if self.args.cuda:
                FI = FI.cuda()
        #FI = (torch.mean((torch.log(2*torch.pow(torch.exp( z_q_logvar ),2) + 1) - 2 * z_q_logvar)) - self.args.M ).abs()
        #FI = (torch.mean((1/torch.exp( z_q_logvar ) + 1/(2*torch.pow( torch.exp( z_q_logvar ), 2 )))) - self.args.M ).abs()

        # MI
        if self.args.MI is True:
            MI = self.MI(x)
        else:
            MI = Variable(torch.zeros(1), requires_grad=False)
            if self.args.cuda:
                MI = MI.cuda()

        loss = -RE + beta * KL  #+  self.args.gamma * FI + self.args.ksi * MI #- self.args.gamma * torch.log(FI)

        if self.args.FI is True:
            loss -= torch.mean(FI * gamma, dim=1)

        #print(FI)

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)
            FI = torch.mean(torch.exp(torch.mean(FI)))
            MI = torch.mean(MI)

        return loss, RE, KL, FI, MI
示例#9
0
    def calculate_loss(self, x, beta=1., average=False):
        # pass through VAE
        x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(
            x)

        # p(x|z)p(z)
        if self.args.input_type == 'binary':
            log_p_x_given_z = log_Bernoulli(x, x_mean, dim=1)
        elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
            log_p_x_given_z = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
        else:
            raise Exception('Wrong input type!')

        log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
        log_p_z2 = self.log_p_z2(z2_q)
        log_p_z = log_p_z1 + beta * log_p_z2

        # q(z|x)q(x)
        log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
        log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
        log_q_z_given_x = log_q_z1 + log_q_z2

        if self.args.q_x_prior == "marginal":
            # q(x) is marginal of p(x, z)
            log_q_x = log_p_x_given_z + log_p_z - log_q_z_given_x
        elif self.args.q_x_prior == "vampprior":
            # q(x) is vamprior of p(x|u)
            log_q_x = self.log_q_x_vampprior(x)

        RE = log_p_x_given_z
        KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

        # MIM loss
        loss = -0.5 * (log_p_x_given_z + log_p_z + beta *
                       (log_q_z_given_x + log_q_x))

        if average:
            loss = torch.mean(loss)
            RE = torch.mean(RE)
            KL = torch.mean(KL)

        # symmetric sampling
        if self.p_samp and (beta >= 1.0):

            z2_q = None
            z1_q = None

            # p(x|z) Sampling from PixelCNN
            z1_q, z2_q, x = self.generate_x(
                N=x.shape[0],
                return_z=True,
                z1=z1_q,
                z2=z2_q,
            )

            # discrete samples should have no gradients
            if self.args.input_type == 'binary':
                x = x.detach()

            # discrete samples should have no gradients
            x_shape = (-1, ) + tuple(self.args.input_size)
            x = x.view(x_shape)

            # z2 ~ q(z2 | x)
            z2_q_mean, z2_q_logvar = self.q_z2(x)
            # z1 ~ q(z1 | x, z2)
            z1_q_mean, z1_q_logvar = self.q_z1(x, z2_q)
            # p(z1 | z2)
            z1_p_mean, z1_p_logvar = self.p_z1(z2_q)
            # x_mean = p(x|z1,z2)
            x_mean, x_logvar = self.p_x(z1_q, z2_q)

            x = x.view((x.shape[0], -1))

            # p(x|z)p(z)
            if self.args.input_type == 'binary':
                log_p_x_given_z = log_Bernoulli(x, x_mean, dim=1)
            elif self.args.input_type == 'gray' or self.args.input_type == 'continuous':
                log_p_x_given_z = -log_Logistic_256(x, x_mean, x_logvar, dim=1)
            else:
                raise Exception('Wrong input type!')

            log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
            log_p_z2 = self.log_p_z2(z2_q)
            log_p_z = log_p_z1 + beta * log_p_z2

            # q(z|x)q(x)
            log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
            log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
            log_q_z_given_x = log_q_z1 + log_q_z2

            if self.args.q_x_prior == "marginal":
                # q(x) is marginal of p(x, z)
                log_q_x = log_p_x_given_z
            elif self.args.q_x_prior == "vampprior":
                # q(x) is vamprior of p(x|u)
                log_q_x = self.log_q_x_vampprior(x)

            loss_p = -0.5 * (log_p_x_given_z + log_p_z + beta *
                             (log_q_z_given_x + log_q_x))

            # REINFORCE
            if self.args.input_type == 'binary':
                loss_p = loss_p + loss_p.detach() * log_p_x_given_z - (
                    loss_p * log_p_x_given_z).detach()

            # MIM loss
            loss += beta * loss_p.mean()

        return loss, RE, KL