コード例 #1
0
    def set_up(self):
        # place a gamma on the Eigenvalues of a Gaussian covariance
        EVs = np.random.gamma(shape=1, size=self.D)

        # random orthogonal matrix to rotate
        Q = qmult(np.eye(self.D))
        Sigma = Q.T.dot(np.diag(EVs)).dot(Q)

        # Cholesky of random covariance
        L = np.linalg.cholesky(Sigma)

        # target density
        self.dlogq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=True)
        self.logq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=False)

        # starting state
        self.q_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L, is_cholesky=True)[0]

        logger.info("N=%d, D=%d" % (self.N, self.D))
        self.Z = sample_gaussian(self.N,
                                 mu=np.zeros(self.D),
                                 Sigma=L,
                                 is_cholesky=True)
コード例 #2
0
    def set_up(self):
        L = np.linalg.cholesky(np.eye(self.D) * self.sigma_q)

        # target density
        self.dlogq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=True)
        self.logq = lambda x: log_gaussian_pdf(
            x, Sigma=L, is_cholesky=True, compute_grad=False)

        # starting state
        self.q_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L, is_cholesky=True)[0]

        logger.info("N=%d, D=%d" % (self.N, self.D))
        self.Z = sample_gaussian(self.N,
                                 mu=np.zeros(self.D),
                                 Sigma=L,
                                 is_cholesky=True)
コード例 #3
0
ファイル: gaussian_tests.py プロジェクト: afcarl/kamiltonian
def test_isotropic_zero_mean_equals_log_gaussian_pdf():
    D = 2
    x = np.random.randn(D)
    g = IsotropicZeroMeanGaussian(sigma=np.sqrt(2))
    log_pdf = log_gaussian_pdf(x,
                               mu=np.zeros(D),
                               Sigma=np.eye(D) * 2,
                               is_cholesky=False,
                               compute_grad=False)
    assert_close(log_pdf, g.log_pdf(x))
コード例 #4
0
ファイル: HABCJob.py プロジェクト: afcarl/kamiltonian
    def grad(self, theta):
        logger.debug("Entering")

        # update likelihood term
        self._update(theta)

        log_lik = lambda theta: log_gaussian_pdf(
            theta, self.mu, self.L, is_cholesky=True)

        #         logger.debug("Computing SPSA gradient")
        grad_lik_est = SPSA(log_lik,
                            theta,
                            stepsize=5.,
                            num_repeats=self.num_spsa_repeats)
        grad_prior = self.abc_target.prior.grad(theta)

        # update online covariance matrix estimate
        self.grad_cov_est_n += 1
        delta = grad_lik_est - self.grad_cov_est_mean
        self.grad_cov_est_mean += delta / self.grad_cov_est_n
        self.grad_cov_est_M2 += np.outer(delta,
                                         grad_lik_est - self.grad_cov_est_mean)

        if self.grad_cov_est_n > 1:
            self.grad_cov_est = self.grad_cov_est_M2 / (self.grad_cov_est_n -
                                                        1)
            logger.debug("Variance grad_0: %.4f" % self.grad_cov_est[0, 0])


#         logger.debug("grad_lik_est: %s" % str(grad_lik_est))
#         logger.debug("grad_prior: %s" % str(grad_prior))
#         logger.debug("||grad_lik_est||: %.2f" % np.linalg.norm(grad_lik_est))
#         logger.debug("||grad_prior||: %.2f" % np.linalg.norm(grad_prior))
#         logger.debug("||grad_lik_est-grad_prior||: %.2f" % np.linalg.norm(grad_lik_est-grad_prior))

        logger.debug("Leaving")
        return grad_lik_est + grad_prior
コード例 #5
0
ファイル: banana_target.py プロジェクト: afcarl/kamiltonian
    C = _compute_C_sym(Z, K, sigma)
    a = score_matching_sym(Z, sigma, lmbda, K, b, C)
    J = _objective_sym(Z, sigma, lmbda, a, K, b, C)
    J_xval = np.mean(xvalidate(Z, 5, sigma, lmbda, K))
    print("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
            (N, sigma, lmbda, J, J_xval))

    kernel = lambda X, Y=None: gaussian_kernel(X, Y, sigma=sigma)
    kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
    logq_est = lambda x: log_pdf_estimate(x, a, Z, kernel)
    dlogq_est = lambda x: log_pdf_estimate_grad(x, a, Z, kernel_grad)

    # momentum
    Sigma_p = np.eye(D) * .1
    L_p = np.linalg.cholesky(Sigma_p)
    logp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=False, is_cholesky=True)
    dlogp = lambda x: log_gaussian_pdf(
        x, Sigma=L_p, compute_grad=True, is_cholesky=True)
    p_sample = lambda: sample_gaussian(
        N=1, mu=np.zeros(D), Sigma=L_p, is_cholesky=True)[0]

    # starting state
    p0 = p_sample()
    q0 = np.zeros(D)
    q0[:2] = np.array([0, -3])

    # parameters
    num_steps = 1500
    step_size = .1

    Xs_q = np.linspace(-20, 20)
コード例 #6
0
ファイル: TrajectoryJob.py プロジェクト: afcarl/kamiltonian
    def compute_trajectory(self, random_start_state=None):
        logger.debug("Entering")

        if random_start_state is not None:
            np.random.set_state(random_start_state)
        else:
            random_start_state = np.random.get_state()

        # momentum
        L_p = np.linalg.cholesky(np.eye(self.D) * self.sigma_p)
        self.logp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=False, is_cholesky=True)
        self.dlogp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=True, is_cholesky=True)
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]

        # set up target and momentum densities and gradients
        self.set_up()

        dlogq_est = self.update_density_estimate()

        # random number of steps?
        if self.max_steps is not None:
            steps = np.random.randint(self.num_steps, self.max_steps + 1)
        else:
            steps = self.num_steps

        logger.info("Simulating trajectory for at least L=%d steps of size %.2f" % \
                     (self.num_steps, self.step_size))
        # starting state
        p0 = self.p_sample()
        q0 = self.q_sample()

        Qs, Ps = leapfrog(q0, self.dlogq, p0, self.dlogp, self.step_size,
                          steps)

        # run second integrator for same amount of steps
        steps_taken = len(Qs)
        Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, self.dlogp,
                                  self.step_size, steps_taken)
        logger.info("%d steps taken" % steps_taken)

        logger.info("Computing average acceptance probabilities")
        log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, self.logq, self.logp)
        log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, self.logq,
                                            self.logp)
        acc_mean = np.mean(np.exp(log_acc))
        acc_est_mean = np.mean(np.exp(log_acc_est))
        idx09 = int(len(log_acc) * 0.9)
        acc_mean10 = np.mean(np.exp(log_acc[idx09:]))
        acc_est_mean10 = np.mean(np.exp(log_acc_est[idx09:]))

        logger.info("Computing average volumes")
        log_det = compute_log_det_trajectory(Qs, Ps)
        log_det_est = compute_log_det_trajectory(Qs_est, Ps_est)

        logger.info("Average acceptance prob: %.2f, %.2f" %
                    (acc_mean, acc_est_mean))
        logger.info("Average acceptance prob (last 10 percent): %.2f, %.2f" %
                    (acc_mean10, acc_est_mean10))
        logger.info("Log-determinant: %.2f, %.2f" % (log_det, log_det_est))

        logger.debug("Leaving")
        return acc_mean, acc_est_mean, log_det, log_det_est, steps_taken, random_start_state
コード例 #7
0
    def compute_trajectory(self, random_start_state=None):
        logger.debug("Entering")

        if random_start_state is not None:
            np.random.set_state(random_start_state)
        else:
            random_start_state = np.random.get_state()

        # momentum
        L_p = np.linalg.cholesky(np.eye(self.D) * self.sigma_p)
        self.logp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=False, is_cholesky=True)
        self.dlogp = lambda x: log_gaussian_pdf(
            x, Sigma=L_p, compute_grad=True, is_cholesky=True)
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]
        self.p_sample = lambda: sample_gaussian(
            N=1, mu=np.zeros(self.D), Sigma=L_p, is_cholesky=True)[0]

        # set up target and momentum densities and gradients
        self.set_up()

        logger.info("Learning kernel bandwidth")
        sigma = select_sigma_grid(self.Z, lmbda=self.lmbda, log2_sigma_max=15)
        logger.info("Using lmbda=%.2f, sigma: %.2f" % (self.lmbda, sigma))

        logger.info("Computing kernel matrix")
        K = gaussian_kernel(self.Z, sigma=sigma)

        logger.info("Estimate density in RKHS")
        b = _compute_b_sym(self.Z, K, sigma)
        C = _compute_C_sym(self.Z, K, sigma)
        a = score_matching_sym(self.Z, sigma, self.lmbda, K, b, C)

        #         logger.info("Computing objective function")
        #         J = _objective_sym(Z, sigma, self.lmbda, a, K, b, C)
        #         J_xval = np.mean(xvalidate(Z, 5, sigma, self.lmbda, K))
        #         logger.info("N=%d, sigma: %.2f, lambda: %.2f, J(a)=%.2f, XJ(a)=%.2f" % \
        #                 (self.N, sigma, self.lmbda, J, J_xval))

        kernel_grad = lambda x, X=None: gaussian_kernel_grad(x, X, sigma)
        dlogq_est = lambda x: log_pdf_estimate_grad(x, a, self.Z, kernel_grad)


        logger.info("Simulating trajectory for L=%d steps of size %.2f" % \
                     (self.num_steps, self.step_size))
        # starting state
        p0 = self.p_sample()
        q0 = self.q_sample()

        Qs, Ps = leapfrog(q0, self.dlogq, p0, self.dlogp, self.step_size,
                          self.num_steps, self.max_steps)

        # run second integrator for same amount of steps
        steps_taken = len(Qs)
        logger.info("%d steps taken" % steps_taken)
        Qs_est, Ps_est = leapfrog(q0, dlogq_est, p0, self.dlogp,
                                  self.step_size, steps_taken)

        logger.info("Computing average acceptance probabilities")
        log_acc = compute_log_accept_pr(q0, p0, Qs, Ps, self.logq, self.logp)
        log_acc_est = compute_log_accept_pr(q0, p0, Qs_est, Ps_est, self.logq,
                                            self.logp)
        acc_mean = np.mean(np.exp(log_acc))
        acc_est_mean = np.mean(np.exp(log_acc_est))

        logger.info("Computing average volumes")
        log_det = compute_log_det_trajectory(Qs, Ps)
        log_det_est = compute_log_det_trajectory(Qs_est, Ps_est)

        logger.info("Average acceptance prob: %.2f, %.2f" %
                    (acc_mean, acc_est_mean))
        logger.info("Log-determinant: %.2f, %.2f" % (log_det, log_det_est))

        logger.debug("Leaving")
        return acc_mean, acc_est_mean, log_det, log_det_est, steps_taken, random_start_state
コード例 #8
0
ファイル: abc_skew_normal.py プロジェクト: afcarl/kamiltonian
 def grad(self, x):
     return log_gaussian_pdf(x,
                             self.mu,
                             self.L,
                             is_cholesky=True,
                             compute_grad=True)
コード例 #9
0
ファイル: abc_skew_normal.py プロジェクト: afcarl/kamiltonian
 def log_pdf(self, x):
     return log_gaussian_pdf(x, self.mu, self.L, is_cholesky=True)
コード例 #10
0
def prior_log_pdf(x):
    D = len(x)
    return log_gaussian_pdf(x, mu=0. * np.ones(D), Sigma=np.eye(D) * 5)
コード例 #11
0
ファイル: interactive.py プロジェクト: afcarl/kamiltonian
        plot_pdf = True
    else:
        plot_pdf = False

    update_plot(0)


if __name__ == "__main__":
    D = 2

    # true target log density
    Sigma = np.diag(np.linspace(0.01, 1, D))
    Sigma[:2, :2] = np.array([[1, .95], [.95, 1]])
    #     Sigma = np.eye(D)
    L = np.linalg.cholesky(Sigma)
    dlogq = lambda x: log_gaussian_pdf(
        x, Sigma=L, is_cholesky=True, compute_grad=True)
    logq = lambda x: log_gaussian_pdf(
        x, Sigma=L, is_cholesky=True, compute_grad=False)

    # sample density
    mu = np.zeros(D)
    N = 200
    np.random.seed(0)
    Z = sample_gaussian(N, mu, Sigma=L, is_cholesky=True)
    Z = sample_banana(N, D)
    #     print np.sum(Z) * np.std(Z) * np.sum(Z**2) * np.std(Z**2)

    Xs = np.linspace(-15, 15)
    Ys = np.linspace(-10, 5)

    plot_true()
コード例 #12
0
from kmc.score_matching.random_feats.estimator import log_pdf_estimate_grad, \
    log_pdf_estimate
from kmc.score_matching.random_feats.gaussian_rkhs import sample_basis, \
    score_matching_sym, feature_map_grad_single, feature_map_single, objective
from kmc.tools.convergence_stats import autocorr
import matplotlib.pyplot as plt
import numpy as np
from scripts.tools.plotting import evaluate_density_grid, plot_array, \
    plot_2d_trajectory, evaluate_gradient_grid


# target
D = 2
bananicity = 0.03
L = np.linalg.cholesky(np.eye(D))
dlogq = lambda x: log_gaussian_pdf(x, Sigma=L, is_cholesky=True, compute_grad=True)
logq = lambda x: log_gaussian_pdf(x, Sigma=L, is_cholesky=True, compute_grad=False)

# oracle samples
N = 500
Z = sample_gaussian(N, mu=np.zeros(D), Sigma=L, is_cholesky=True)

# fit density in RKHS from oracle samples
sigma = 0.5
gamma = 0.5 * (sigma ** 2)
lmbda = 0.0008
m = N
gamma = 0.5 * (sigma ** 2)
omega, u = sample_basis(D, m, gamma)
theta = score_matching_sym(Z, lmbda, omega, u)
logq_est = lambda x: log_pdf_estimate(feature_map_single(x, omega, u), theta)