Beispiel #1
0
    def calculate_likelihood(self, X, dir, mode='test', S=5000):
        # set auxiliary variables for number of training and test sets
        N_test = X.size(0)

        # init list
        likelihood_test = []

        MB = 100

        if S <= MB:
            R = 1
        else:
            R = S / MB
            S = MB

        for j in range(N_test):
            if j % 100 == 0:
                print('{:.2f}%'.format(j / (1. * N_test) * 100))
            # Take x*
            x_single = X[j].unsqueeze(0)

            a = []
            for r in range(0, R):
                # Repeat it for all training points
                x = x_single.expand(S, x_single.size(1))

                # pass through VAE
                x_mean, x_logvar, z1_q, z1_q_mean, z1_q_logvar, z2_q, z2_q_mean, z2_q_logvar, z1_p_mean, z1_p_logvar = self.forward(
                    x)

                # RE
                RE = log_Bernoulli(x, x_mean, dim=1)

                # KL
                log_p_z2 = self.log_p_z2(z2_q)
                log_p_z1 = log_Normal_diag(z1_q, z1_p_mean, z1_p_logvar, dim=1)
                log_q_z1 = log_Normal_diag(z1_q, z1_q_mean, z1_q_logvar, dim=1)
                log_q_z2 = log_Normal_diag(z2_q, z2_q_mean, z2_q_logvar, dim=1)
                KL = -(log_p_z1 + log_p_z2 - log_q_z1 - log_q_z2)

                a_tmp = (RE - KL)

                a.append(a_tmp.cpu().data.numpy())

            # calculate max
            a = np.asarray(a)
            a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
            likelihood_x = logsumexp(a)
            likelihood_test.append(likelihood_x - np.log(S))

        likelihood_test = np.array(likelihood_test)

        plot_histogram(-likelihood_test, dir, mode)

        return -np.mean(likelihood_test)
    def calculate_likelihood(self, X, dir, mode='test', S=5000, MB=100):
        # set auxiliary variables for number of training and test sets
        X = X.view(-1, np.prod(self.args.input_size))
        N_test = X.size(0)

        # init list
        likelihood_test = []

        if S <= MB:
            R = 1
        else:
            R = S / MB
            S = MB

        for j in range(N_test):
            if j % 100 == 0:
                print('{:.2f}%'.format(j / (1. * N_test) * 100))
            # Take x*
            x_single = X[j].unsqueeze(0)

            a = []
            for r in range(0, R):
                # Repeat it for all training points
                x = x_single.expand(S, x_single.size(1)).contiguous()

                a_tmp, _, _, _, _ = self.calculate_loss(x)

                a.append(-a_tmp.cpu().data.numpy())

            # calculate max
            a = np.asarray(a)
            a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
            likelihood_x = logsumexp(a)
            likelihood_test.append(likelihood_x - np.log(len(a)))

        likelihood_test = np.array(likelihood_test)

        plot_histogram(-likelihood_test, dir, mode)

        return -np.mean(likelihood_test)
Beispiel #3
0
    def calculate_likelihood(self, X, dir, mode='test', S=5000, MB=100):
        # set auxiliary variables for number of training and test sets
        N_test = X.size(0)

        # init list
        likelihood_test = []

        if S <= MB:
            R = 1
        else:
            R = S / MB
            S = MB

        for j in range(N_test):
            if j % 100 == 0:
                print('{:.2f}%'.format(j / (1. * N_test) * 100))
            # Take x*
            x_single = X[j].unsqueeze(0)

            a = []
            for r in range(0, R):
                # Repeat it for all training points
                x = x_single.expand(S, x_single.size(1))

                a_tmp, _, _ = self.calculate_loss(x)

                a.append( -a_tmp.cpu().data.numpy() )

            # calculate max
            a = np.asarray(a)
            a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
            likelihood_x = logsumexp( a )
            likelihood_test.append(likelihood_x - np.log(len(a)))

        likelihood_test = np.array(likelihood_test)

        plot_histogram(-likelihood_test, dir, mode)

        return -np.mean(likelihood_test)
    def calculate_likelihood(self,
                             data,
                             mode='valid',
                             s=5000,
                             display_rate=100):
        # set auxiliary variables for number of training and valid sets
        n_test = data.size(0)

        # init list
        likelihood = []

        mb = 500

        if s <= mb:
            r = 1
        else:
            r = s / mb
            s = mb

        for j in range(n_test):
            n = 100 * (j / (1. * n_test))
            if j % display_rate == 0:
                print("\revaluating likelihood:",
                      j,
                      "/",
                      n_test,
                      -np.mean(likelihood),
                      end="",
                      flush=True)
            # Take x*                    print("\rProgress: {:.2f}%".format(progress), end="", flush=True)
            x_single = data[j].unsqueeze(0).view(self.input_shape[0],
                                                 self.input_size)
            a = []
            for _ in range(0, int(r)):
                # Repeat it for all training points
                x = x_single.expand(s, x_single.size(1))

                # pass through VAE
                if self.flavour in ["ccLinIAF", "hf", "vanilla", "normflow"]:
                    loss, rec, kl, _ = self.calculate_losses(x)
                elif self.flavour in [
                        "o-sylvester", "h-sylvester", "t-sylvester"
                ]:
                    reconstruction, z_mu, z_var, ldj, z0, zk = self(x)
                    loss, rec, kl = calculate_losses(reconstruction, x, z_mu,
                                                     z_var, z0, zk, ldj)
                else:
                    print(self.flavour, "is not a flavour, quiting.")
                    exit()

                a.append(loss.cpu().data.numpy())

            # calculate max
            a = np.asarray(a)
            a = np.reshape(a, (a.shape[0], 1))
            likelihood_x = logsumexp(a)
            likelihood.append(likelihood_x - np.log(len(a)))

        likelihood = np.array(likelihood)

        plot_histogram(-likelihood, self.model_history_path, mode)

        return -np.mean(likelihood)